ngram
listlengths
0
67.8k
[ "try: if self.label == '': self.fqdn = self.domain.name else: self.fqdn = \"{0}.{1}\".format(self.label, self.domain.name)", "class to provide the ``get_absolute_url``, ``get_edit_url``, and ``get_delete_url`` functions. This class does validation", "blank=True, null=True, validators=[validate_label]) fqdn = models.CharField(max_length=255, blank=True, null=True, validators=[validate_name]) # fqdn = label", "validate_label, validate_name from cyder.settings import CYDNS_BASE_URL class CydnsRecord(models.Model, ObjectUrlMixin): \"\"\" This class provides", "can just search the ``obj.fqdn`` field. As of commit 7b2fd19f, the build scripts", "# fqdn = label + domain.name <--- see set_fqdn class Meta: abstract =", "self.set_fqdn() self.check_TLD_condition() def save(self, *args, **kwargs): if kwargs.has_key('no_build'): no_build = kwargs.pop('no_build') # Removes", "not self.domain.delegated: return if not self.pk: # We don't exist yet. raise ValidationError(\"No", "import validate_label, validate_name from cyder.settings import CYDNS_BASE_URL class CydnsRecord(models.Model, ObjectUrlMixin): \"\"\" This class", "DNS record classes share. This includes a foreign key to the ``domain`` table", "= \"{0}.{1}\".format(self.label, self.domain.name) except ObjectDoesNotExist: return def check_for_cname(self): \"\"\"\"If a CNAME RR is", "models.CharField(max_length=255, blank=True, null=True, validators=[validate_name]) # fqdn = label + domain.name <--- see set_fqdn", "records much easier. Instead of looking at ``obj.label`` together with ``obj.domain.name``, you can", "7b2fd19f, the build scripts do not care about ``fqdn``. This could change. \"the", "explicitly if you need them to. ``CydnsRecord`` will not enforce uniqueness for you.", "preent at a node, no other data should be present; this ensures that", "+ domain.name <--- see set_fqdn class Meta: abstract = True def clean(self): self.set_fqdn()", "``ValidationError``. If you plan on using the ``unique_together`` constraint on a Model that", "objects created in them. \"\"\" if not self.domain.delegated: return if not self.pk: #", "import ObjectUrlMixin from cyder.cydns.validation import validate_label, validate_name from cyder.settings import CYDNS_BASE_URL class CydnsRecord(models.Model,", "field makes searching for records much easier. Instead of looking at ``obj.label`` together", "Meta: abstract = True def clean(self): self.set_fqdn() self.check_TLD_condition() def save(self, *args, **kwargs): if", "models.CharField(max_length=100, blank=True, null=True, validators=[validate_label]) fqdn = models.CharField(max_length=255, blank=True, null=True, validators=[validate_name]) # fqdn =", "of commit 7b2fd19f, the build scripts do not care about ``fqdn``. This could", "aliases cannot be different.\" -- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_ Call this function in models", "if you need them to. ``CydnsRecord`` will not enforce uniqueness for you. All", "As of commit 7b2fd19f, the build scripts do not care about ``fqdn``. This", "def save(self, *args, **kwargs): if kwargs.has_key('no_build'): no_build = kwargs.pop('no_build') # Removes key. else:", "be rebuilt. self.domain.dirty = True self.domain.save() def set_fqdn(self): try: if self.label == '':", "cyder.cydns.validation import validate_label, validate_name from cyder.settings import CYDNS_BASE_URL class CydnsRecord(models.Model, ObjectUrlMixin): \"\"\" This", "(i.e., the sum of all label octets and label lengths) is limited to", "to be changed. Delegated domains cannot have objects created in them. \"\"\" if", "octets and label lengths) is limited to 255\" - RFC 4471 \"\"\" domain", "import models import cyder from cyder.cydns.domain.models import Domain, _check_TLD_condition from cyder.cydns.mixins import ObjectUrlMixin", "dirty so it can be rebuilt. self.domain.dirty = True self.domain.save() def set_fqdn(self): try:", "return def check_for_cname(self): \"\"\"\"If a CNAME RR is preent at a node, no", "validators=[validate_name]) # fqdn = label + domain.name <--- see set_fqdn class Meta: abstract", "as dirty so it can be rebuilt. self.domain.dirty = True self.domain.save() def set_fqdn(self):", "RFC 4471 \"\"\" domain = models.ForeignKey(Domain, null=False) label = models.CharField(max_length=100, blank=True, null=True, validators=[validate_label])", "do not care about ``fqdn``. This could change. \"the total number of octets", "This could change. \"the total number of octets that represent a name (i.e.,", "and ``get_delete_url`` functions. This class does validation on the ``label`` field. Call ``clean_all``", "objects can be created in the {0}\" \"domain. It is delegated.\" .format(self.domain.name)) def", "the ``get_absolute_url``, ``get_edit_url``, and ``get_delete_url`` functions. This class does validation on the ``label``", "in models that can't overlap with an existing CNAME. \"\"\" CNAME = cyder.cydns.cname.models.CNAME", "data should be present; this ensures that the data for a canonical name", "canonical name and its aliases cannot be different.\" -- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_ Call", "abstract = True def clean(self): self.set_fqdn() self.check_TLD_condition() def save(self, *args, **kwargs): if kwargs.has_key('no_build'):", "null=True, validators=[validate_label]) fqdn = models.CharField(max_length=255, blank=True, null=True, validators=[validate_name]) # fqdn = label +", "This field makes searching for records much easier. Instead of looking at ``obj.label``", "<http://tools.ietf.org/html/rfc1034>`_ Call this function in models that can't overlap with an existing CNAME.", "no_build: pass else: # Mark the domain as dirty so it can be", "self.domain.save() def set_fqdn(self): try: if self.label == '': self.fqdn = self.domain.name else: self.fqdn", "provides common functionality that many DNS record classes share. This includes a foreign", "from cyder.settings import CYDNS_BASE_URL class CydnsRecord(models.Model, ObjectUrlMixin): \"\"\" This class provides common functionality", "import CYDNS_BASE_URL class CydnsRecord(models.Model, ObjectUrlMixin): \"\"\" This class provides common functionality that many", "self.domain.name) except ObjectDoesNotExist: return def check_for_cname(self): \"\"\"\"If a CNAME RR is preent at", "from cyder.cydns.validation import validate_label, validate_name from cyder.settings import CYDNS_BASE_URL class CydnsRecord(models.Model, ObjectUrlMixin): \"\"\"", "records have a ``fqdn`` field. This field is updated every time the object", "This field is updated every time the object is saved:: fqdn = name", "on the ``label`` field. Call ``clean_all`` to trigger the validation functions. Failure to", "a ``fqdn`` field. This field is updated every time the object is saved::", "name + domain.name or if name == '' fqdn = domain.name This field", "``get_absolute_url``, ``get_edit_url``, and ``get_delete_url`` functions. This class does validation on the ``label`` field.", "existing CNAME. \"\"\" CNAME = cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A CNAME with this", "inherits from ``CydnsRecord``, you must include ``domain`` and ``label`` explicitly if you need", "together with ``obj.domain.name``, you can just search the ``obj.fqdn`` field. As of commit", "if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A CNAME with this name already exists.\") def check_for_delegation(self): \"\"\"If", "will raise a ``ValidationError``. If you plan on using the ``unique_together`` constraint on", "this function in models that can't overlap with an existing CNAME. \"\"\" CNAME", "= False # We are rebuilding super(CydnsRecord, self).save(*args, **kwargs) if no_build: pass else:", "CNAME. \"\"\" CNAME = cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A CNAME with this name", "``domain`` table and a ``label`` CharField. This class also inherits from the ``ObjectUrlMixin``", "This class also inherits from the ``ObjectUrlMixin`` class to provide the ``get_absolute_url``, ``get_edit_url``,", "def check_for_delegation(self): \"\"\"If an object's domain is delegated it should not be able", "clean(self): self.set_fqdn() self.check_TLD_condition() def save(self, *args, **kwargs): if kwargs.has_key('no_build'): no_build = kwargs.pop('no_build') #", "the sum of all label octets and label lengths) is limited to 255\"", "and a ``label`` CharField. This class also inherits from the ``ObjectUrlMixin`` class to", "def check_for_cname(self): \"\"\"\"If a CNAME RR is preent at a node, no other", "# We don't exist yet. raise ValidationError(\"No objects can be created in the", "saved:: fqdn = name + domain.name or if name == '' fqdn =", "a canonical name and its aliases cannot be different.\" -- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_", "class provides common functionality that many DNS record classes share. This includes a", "or if name == '' fqdn = domain.name This field makes searching for", "key to the ``domain`` table and a ``label`` CharField. This class also inherits", "yet. raise ValidationError(\"No objects can be created in the {0}\" \"domain. It is", "is delegated it should not be able to be changed. Delegated domains cannot", "if not self.pk: # We don't exist yet. raise ValidationError(\"No objects can be", "at ``obj.label`` together with ``obj.domain.name``, you can just search the ``obj.fqdn`` field. As", "that many DNS record classes share. This includes a foreign key to the", "classes share. This includes a foreign key to the ``domain`` table and a", "CydnsRecord(models.Model, ObjectUrlMixin): \"\"\" This class provides common functionality that many DNS record classes", "common records have a ``fqdn`` field. This field is updated every time the", "no_build = kwargs.pop('no_build') # Removes key. else: no_build = False # We are", "is limited to 255\" - RFC 4471 \"\"\" domain = models.ForeignKey(Domain, null=False) label", "from django.db import models import cyder from cyder.cydns.domain.models import Domain, _check_TLD_condition from cyder.cydns.mixins", "many DNS record classes share. This includes a foreign key to the ``domain``", "\"\"\" if not self.domain.delegated: return if not self.pk: # We don't exist yet.", "key. else: no_build = False # We are rebuilding super(CydnsRecord, self).save(*args, **kwargs) if", "django.db import models import cyder from cyder.cydns.domain.models import Domain, _check_TLD_condition from cyder.cydns.mixins import", "field. As of commit 7b2fd19f, the build scripts do not care about ``fqdn``.", "cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A CNAME with this name already exists.\") def check_for_delegation(self):", "of looking at ``obj.label`` together with ``obj.domain.name``, you can just search the ``obj.fqdn``", "**kwargs): if kwargs.has_key('no_build'): no_build = kwargs.pop('no_build') # Removes key. else: no_build = False", "``obj.domain.name``, you can just search the ``obj.fqdn`` field. As of commit 7b2fd19f, the", "= cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A CNAME with this name already exists.\") def", "``fqdn``. This could change. \"the total number of octets that represent a name", "= self.domain.name else: self.fqdn = \"{0}.{1}\".format(self.label, self.domain.name) except ObjectDoesNotExist: return def check_for_cname(self): \"\"\"\"If", "``get_edit_url``, and ``get_delete_url`` functions. This class does validation on the ``label`` field. Call", "fqdn = label + domain.name <--- see set_fqdn class Meta: abstract = True", "inherits from the ``ObjectUrlMixin`` class to provide the ``get_absolute_url``, ``get_edit_url``, and ``get_delete_url`` functions.", "from the ``ObjectUrlMixin`` class to provide the ``get_absolute_url``, ``get_edit_url``, and ``get_delete_url`` functions. This", "number of octets that represent a name (i.e., the sum of all label", "to trigger the validation functions. Failure to validate will raise a ``ValidationError``. If", "CNAME RR is preent at a node, no other data should be present;", "field is updated every time the object is saved:: fqdn = name +", "on using the ``unique_together`` constraint on a Model that inherits from ``CydnsRecord``, you", "changed. Delegated domains cannot have objects created in them. \"\"\" if not self.domain.delegated:", "easier. Instead of looking at ``obj.label`` together with ``obj.domain.name``, you can just search", "class Meta: abstract = True def clean(self): self.set_fqdn() self.check_TLD_condition() def save(self, *args, **kwargs):", "have objects created in them. \"\"\" if not self.domain.delegated: return if not self.pk:", "does validation on the ``label`` field. Call ``clean_all`` to trigger the validation functions.", "if not self.domain.delegated: return if not self.pk: # We don't exist yet. raise", "rebuilding super(CydnsRecord, self).save(*args, **kwargs) if no_build: pass else: # Mark the domain as", "kwargs.pop('no_build') # Removes key. else: no_build = False # We are rebuilding super(CydnsRecord,", "functions. This class does validation on the ``label`` field. Call ``clean_all`` to trigger", "class does validation on the ``label`` field. Call ``clean_all`` to trigger the validation", "If you plan on using the ``unique_together`` constraint on a Model that inherits", "Model that inherits from ``CydnsRecord``, you must include ``domain`` and ``label`` explicitly if", "set_fqdn class Meta: abstract = True def clean(self): self.set_fqdn() self.check_TLD_condition() def save(self, *args,", "else: # Mark the domain as dirty so it can be rebuilt. self.domain.dirty", "not enforce uniqueness for you. All common records have a ``fqdn`` field. This", "\"\"\"\"If a CNAME RR is preent at a node, no other data should", "CNAME = cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A CNAME with this name already exists.\")", "you plan on using the ``unique_together`` constraint on a Model that inherits from", "plan on using the ``unique_together`` constraint on a Model that inherits from ``CydnsRecord``,", "them. \"\"\" if not self.domain.delegated: return if not self.pk: # We don't exist", "except ObjectDoesNotExist: return def check_for_cname(self): \"\"\"\"If a CNAME RR is preent at a", "== '' fqdn = domain.name This field makes searching for records much easier.", "``obj.label`` together with ``obj.domain.name``, you can just search the ``obj.fqdn`` field. As of", "function in models that can't overlap with an existing CNAME. \"\"\" CNAME =", "label lengths) is limited to 255\" - RFC 4471 \"\"\" domain = models.ForeignKey(Domain,", "every time the object is saved:: fqdn = name + domain.name or if", "cyder.cydns.domain.models import Domain, _check_TLD_condition from cyder.cydns.mixins import ObjectUrlMixin from cyder.cydns.validation import validate_label, validate_name", "Mark the domain as dirty so it can be rebuilt. self.domain.dirty = True", "``label`` explicitly if you need them to. ``CydnsRecord`` will not enforce uniqueness for", "at a node, no other data should be present; this ensures that the", "rebuilt. self.domain.dirty = True self.domain.save() def set_fqdn(self): try: if self.label == '': self.fqdn", "them to. ``CydnsRecord`` will not enforce uniqueness for you. All common records have", "be present; this ensures that the data for a canonical name and its", "from ``CydnsRecord``, you must include ``domain`` and ``label`` explicitly if you need them", "domain.name This field makes searching for records much easier. Instead of looking at", "for a canonical name and its aliases cannot be different.\" -- `RFC 1034", "_check_TLD_condition from cyder.cydns.mixins import ObjectUrlMixin from cyder.cydns.validation import validate_label, validate_name from cyder.settings import", "ValidationError(\"No objects can be created in the {0}\" \"domain. It is delegated.\" .format(self.domain.name))", "if self.label == '': self.fqdn = self.domain.name else: self.fqdn = \"{0}.{1}\".format(self.label, self.domain.name) except", "share. This includes a foreign key to the ``domain`` table and a ``label``", "represent a name (i.e., the sum of all label octets and label lengths)", "``ObjectUrlMixin`` class to provide the ``get_absolute_url``, ``get_edit_url``, and ``get_delete_url`` functions. This class does", "``label`` field. Call ``clean_all`` to trigger the validation functions. Failure to validate will", "domain = models.ForeignKey(Domain, null=False) label = models.CharField(max_length=100, blank=True, null=True, validators=[validate_label]) fqdn = models.CharField(max_length=255,", "self.fqdn = self.domain.name else: self.fqdn = \"{0}.{1}\".format(self.label, self.domain.name) except ObjectDoesNotExist: return def check_for_cname(self):", "be created in the {0}\" \"domain. It is delegated.\" .format(self.domain.name)) def check_TLD_condition(self): _check_TLD_condition(self)", "validation on the ``label`` field. Call ``clean_all`` to trigger the validation functions. Failure", "created in them. \"\"\" if not self.domain.delegated: return if not self.pk: # We", "the ``unique_together`` constraint on a Model that inherits from ``CydnsRecord``, you must include", "import Domain, _check_TLD_condition from cyder.cydns.mixins import ObjectUrlMixin from cyder.cydns.validation import validate_label, validate_name from", "exists.\") def check_for_delegation(self): \"\"\"If an object's domain is delegated it should not be", "CYDNS_BASE_URL class CydnsRecord(models.Model, ObjectUrlMixin): \"\"\" This class provides common functionality that many DNS", "validation functions. Failure to validate will raise a ``ValidationError``. If you plan on", "``label`` CharField. This class also inherits from the ``ObjectUrlMixin`` class to provide the", "= kwargs.pop('no_build') # Removes key. else: no_build = False # We are rebuilding", "CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A CNAME with this name already exists.\") def check_for_delegation(self): \"\"\"If an", "if no_build: pass else: # Mark the domain as dirty so it can", "common functionality that many DNS record classes share. This includes a foreign key", "name already exists.\") def check_for_delegation(self): \"\"\"If an object's domain is delegated it should", "don't exist yet. raise ValidationError(\"No objects can be created in the {0}\" \"domain.", "cannot have objects created in them. \"\"\" if not self.domain.delegated: return if not", "need them to. ``CydnsRecord`` will not enforce uniqueness for you. All common records", "to the ``domain`` table and a ``label`` CharField. This class also inherits from", "= models.CharField(max_length=100, blank=True, null=True, validators=[validate_label]) fqdn = models.CharField(max_length=255, blank=True, null=True, validators=[validate_name]) # fqdn", "of octets that represent a name (i.e., the sum of all label octets", "functionality that many DNS record classes share. This includes a foreign key to", "This class provides common functionality that many DNS record classes share. This includes", "the ``ObjectUrlMixin`` class to provide the ``get_absolute_url``, ``get_edit_url``, and ``get_delete_url`` functions. This class", "to provide the ``get_absolute_url``, ``get_edit_url``, and ``get_delete_url`` functions. This class does validation on", "- RFC 4471 \"\"\" domain = models.ForeignKey(Domain, null=False) label = models.CharField(max_length=100, blank=True, null=True,", "build scripts do not care about ``fqdn``. This could change. \"the total number", "import cyder from cyder.cydns.domain.models import Domain, _check_TLD_condition from cyder.cydns.mixins import ObjectUrlMixin from cyder.cydns.validation", "makes searching for records much easier. Instead of looking at ``obj.label`` together with", "django.core.exceptions import ObjectDoesNotExist, ValidationError from django.db import models import cyder from cyder.cydns.domain.models import", "fqdn = models.CharField(max_length=255, blank=True, null=True, validators=[validate_name]) # fqdn = label + domain.name <---", "``get_delete_url`` functions. This class does validation on the ``label`` field. Call ``clean_all`` to", "def set_fqdn(self): try: if self.label == '': self.fqdn = self.domain.name else: self.fqdn =", "is preent at a node, no other data should be present; this ensures", "= domain.name This field makes searching for records much easier. Instead of looking", "Removes key. else: no_build = False # We are rebuilding super(CydnsRecord, self).save(*args, **kwargs)", "raise ValidationError(\"A CNAME with this name already exists.\") def check_for_delegation(self): \"\"\"If an object's", "its aliases cannot be different.\" -- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_ Call this function in", "include ``domain`` and ``label`` explicitly if you need them to. ``CydnsRecord`` will not", "that inherits from ``CydnsRecord``, you must include ``domain`` and ``label`` explicitly if you", "else: self.fqdn = \"{0}.{1}\".format(self.label, self.domain.name) except ObjectDoesNotExist: return def check_for_cname(self): \"\"\"\"If a CNAME", "the domain as dirty so it can be rebuilt. self.domain.dirty = True self.domain.save()", "ObjectUrlMixin): \"\"\" This class provides common functionality that many DNS record classes share.", "that can't overlap with an existing CNAME. \"\"\" CNAME = cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists():", "fqdn = domain.name This field makes searching for records much easier. Instead of", "the ``label`` field. Call ``clean_all`` to trigger the validation functions. Failure to validate", "should not be able to be changed. Delegated domains cannot have objects created", "ValidationError from django.db import models import cyder from cyder.cydns.domain.models import Domain, _check_TLD_condition from", "field. This field is updated every time the object is saved:: fqdn =", "the build scripts do not care about ``fqdn``. This could change. \"the total", "Call ``clean_all`` to trigger the validation functions. Failure to validate will raise a", "you can just search the ``obj.fqdn`` field. As of commit 7b2fd19f, the build", "with an existing CNAME. \"\"\" CNAME = cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A CNAME", "care about ``fqdn``. This could change. \"the total number of octets that represent", "\"{0}.{1}\".format(self.label, self.domain.name) except ObjectDoesNotExist: return def check_for_cname(self): \"\"\"\"If a CNAME RR is preent", "check_for_delegation(self): \"\"\"If an object's domain is delegated it should not be able to", "from cyder.cydns.domain.models import Domain, _check_TLD_condition from cyder.cydns.mixins import ObjectUrlMixin from cyder.cydns.validation import validate_label,", "field. Call ``clean_all`` to trigger the validation functions. Failure to validate will raise", "True def clean(self): self.set_fqdn() self.check_TLD_condition() def save(self, *args, **kwargs): if kwargs.has_key('no_build'): no_build =", "= True def clean(self): self.set_fqdn() self.check_TLD_condition() def save(self, *args, **kwargs): if kwargs.has_key('no_build'): no_build", "self.check_TLD_condition() def save(self, *args, **kwargs): if kwargs.has_key('no_build'): no_build = kwargs.pop('no_build') # Removes key.", "label octets and label lengths) is limited to 255\" - RFC 4471 \"\"\"", "be different.\" -- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_ Call this function in models that can't", "no other data should be present; this ensures that the data for a", "you. All common records have a ``fqdn`` field. This field is updated every", "1034 <http://tools.ietf.org/html/rfc1034>`_ Call this function in models that can't overlap with an existing", "with ``obj.domain.name``, you can just search the ``obj.fqdn`` field. As of commit 7b2fd19f,", "cyder from cyder.cydns.domain.models import Domain, _check_TLD_condition from cyder.cydns.mixins import ObjectUrlMixin from cyder.cydns.validation import", "able to be changed. Delegated domains cannot have objects created in them. \"\"\"", "be changed. Delegated domains cannot have objects created in them. \"\"\" if not", "on a Model that inherits from ``CydnsRecord``, you must include ``domain`` and ``label``", "This includes a foreign key to the ``domain`` table and a ``label`` CharField.", "and ``label`` explicitly if you need them to. ``CydnsRecord`` will not enforce uniqueness", "\"the total number of octets that represent a name (i.e., the sum of", "search the ``obj.fqdn`` field. As of commit 7b2fd19f, the build scripts do not", "not self.pk: # We don't exist yet. raise ValidationError(\"No objects can be created", "``CydnsRecord``, you must include ``domain`` and ``label`` explicitly if you need them to.", "\"\"\"If an object's domain is delegated it should not be able to be", "a node, no other data should be present; this ensures that the data", "to validate will raise a ``ValidationError``. If you plan on using the ``unique_together``", "trigger the validation functions. Failure to validate will raise a ``ValidationError``. If you", "domain.name <--- see set_fqdn class Meta: abstract = True def clean(self): self.set_fqdn() self.check_TLD_condition()", "label + domain.name <--- see set_fqdn class Meta: abstract = True def clean(self):", "name (i.e., the sum of all label octets and label lengths) is limited", "domain is delegated it should not be able to be changed. Delegated domains", "``fqdn`` field. This field is updated every time the object is saved:: fqdn", "exist yet. raise ValidationError(\"No objects can be created in the {0}\" \"domain. It", "from django.core.exceptions import ObjectDoesNotExist, ValidationError from django.db import models import cyder from cyder.cydns.domain.models", "a CNAME RR is preent at a node, no other data should be", "False # We are rebuilding super(CydnsRecord, self).save(*args, **kwargs) if no_build: pass else: #", "name and its aliases cannot be different.\" -- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_ Call this", "fqdn = name + domain.name or if name == '' fqdn = domain.name", "cannot be different.\" -- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_ Call this function in models that", "lengths) is limited to 255\" - RFC 4471 \"\"\" domain = models.ForeignKey(Domain, null=False)", "null=False) label = models.CharField(max_length=100, blank=True, null=True, validators=[validate_label]) fqdn = models.CharField(max_length=255, blank=True, null=True, validators=[validate_name])", "self.domain.name else: self.fqdn = \"{0}.{1}\".format(self.label, self.domain.name) except ObjectDoesNotExist: return def check_for_cname(self): \"\"\"\"If a", "an object's domain is delegated it should not be able to be changed.", "the ``domain`` table and a ``label`` CharField. This class also inherits from the", "domain.name or if name == '' fqdn = domain.name This field makes searching", "total number of octets that represent a name (i.e., the sum of all", "different.\" -- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_ Call this function in models that can't overlap", "and label lengths) is limited to 255\" - RFC 4471 \"\"\" domain =", "self.label == '': self.fqdn = self.domain.name else: self.fqdn = \"{0}.{1}\".format(self.label, self.domain.name) except ObjectDoesNotExist:", "import ObjectDoesNotExist, ValidationError from django.db import models import cyder from cyder.cydns.domain.models import Domain,", "pass else: # Mark the domain as dirty so it can be rebuilt.", "ObjectDoesNotExist: return def check_for_cname(self): \"\"\"\"If a CNAME RR is preent at a node,", "name == '' fqdn = domain.name This field makes searching for records much", "to 255\" - RFC 4471 \"\"\" domain = models.ForeignKey(Domain, null=False) label = models.CharField(max_length=100,", "null=True, validators=[validate_name]) # fqdn = label + domain.name <--- see set_fqdn class Meta:", "ensures that the data for a canonical name and its aliases cannot be", "RR is preent at a node, no other data should be present; this", "of all label octets and label lengths) is limited to 255\" - RFC", "-- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_ Call this function in models that can't overlap with", "Delegated domains cannot have objects created in them. \"\"\" if not self.domain.delegated: return", "includes a foreign key to the ``domain`` table and a ``label`` CharField. This", "ValidationError(\"A CNAME with this name already exists.\") def check_for_delegation(self): \"\"\"If an object's domain", "self).save(*args, **kwargs) if no_build: pass else: # Mark the domain as dirty so", "``clean_all`` to trigger the validation functions. Failure to validate will raise a ``ValidationError``.", "else: no_build = False # We are rebuilding super(CydnsRecord, self).save(*args, **kwargs) if no_build:", "be able to be changed. Delegated domains cannot have objects created in them.", "have a ``fqdn`` field. This field is updated every time the object is", "validators=[validate_label]) fqdn = models.CharField(max_length=255, blank=True, null=True, validators=[validate_name]) # fqdn = label + domain.name", "# We are rebuilding super(CydnsRecord, self).save(*args, **kwargs) if no_build: pass else: # Mark", "*args, **kwargs): if kwargs.has_key('no_build'): no_build = kwargs.pop('no_build') # Removes key. else: no_build =", "models.ForeignKey(Domain, null=False) label = models.CharField(max_length=100, blank=True, null=True, validators=[validate_label]) fqdn = models.CharField(max_length=255, blank=True, null=True,", "record classes share. This includes a foreign key to the ``domain`` table and", "is saved:: fqdn = name + domain.name or if name == '' fqdn", "return if not self.pk: # We don't exist yet. raise ValidationError(\"No objects can", "255\" - RFC 4471 \"\"\" domain = models.ForeignKey(Domain, null=False) label = models.CharField(max_length=100, blank=True,", "already exists.\") def check_for_delegation(self): \"\"\"If an object's domain is delegated it should not", "enforce uniqueness for you. All common records have a ``fqdn`` field. This field", "+ domain.name or if name == '' fqdn = domain.name This field makes", "the validation functions. Failure to validate will raise a ``ValidationError``. If you plan", "ObjectDoesNotExist, ValidationError from django.db import models import cyder from cyder.cydns.domain.models import Domain, _check_TLD_condition", "\"\"\" domain = models.ForeignKey(Domain, null=False) label = models.CharField(max_length=100, blank=True, null=True, validators=[validate_label]) fqdn =", "kwargs.has_key('no_build'): no_build = kwargs.pop('no_build') # Removes key. else: no_build = False # We", "is updated every time the object is saved:: fqdn = name + domain.name", "== '': self.fqdn = self.domain.name else: self.fqdn = \"{0}.{1}\".format(self.label, self.domain.name) except ObjectDoesNotExist: return", "\"\"\" CNAME = cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A CNAME with this name already", "not be able to be changed. Delegated domains cannot have objects created in", "and its aliases cannot be different.\" -- `RFC 1034 <http://tools.ietf.org/html/rfc1034>`_ Call this function", "True self.domain.save() def set_fqdn(self): try: if self.label == '': self.fqdn = self.domain.name else:", "raise a ``ValidationError``. If you plan on using the ``unique_together`` constraint on a", "= name + domain.name or if name == '' fqdn = domain.name This", "octets that represent a name (i.e., the sum of all label octets and", "def clean(self): self.set_fqdn() self.check_TLD_condition() def save(self, *args, **kwargs): if kwargs.has_key('no_build'): no_build = kwargs.pop('no_build')", "Instead of looking at ``obj.label`` together with ``obj.domain.name``, you can just search the", "\"\"\" This class provides common functionality that many DNS record classes share. This", "delegated it should not be able to be changed. Delegated domains cannot have", "a foreign key to the ``domain`` table and a ``label`` CharField. This class", "``unique_together`` constraint on a Model that inherits from ``CydnsRecord``, you must include ``domain``", "= label + domain.name <--- see set_fqdn class Meta: abstract = True def", "about ``fqdn``. This could change. \"the total number of octets that represent a", "We are rebuilding super(CydnsRecord, self).save(*args, **kwargs) if no_build: pass else: # Mark the", "self.domain.delegated: return if not self.pk: # We don't exist yet. raise ValidationError(\"No objects", "'' fqdn = domain.name This field makes searching for records much easier. Instead", "**kwargs) if no_build: pass else: # Mark the domain as dirty so it", "All common records have a ``fqdn`` field. This field is updated every time", "looking at ``obj.label`` together with ``obj.domain.name``, you can just search the ``obj.fqdn`` field.", "must include ``domain`` and ``label`` explicitly if you need them to. ``CydnsRecord`` will", "functions. Failure to validate will raise a ``ValidationError``. If you plan on using", "for you. All common records have a ``fqdn`` field. This field is updated", "raise ValidationError(\"No objects can be created in the {0}\" \"domain. It is delegated.\"", "self.domain.dirty = True self.domain.save() def set_fqdn(self): try: if self.label == '': self.fqdn =", "ObjectUrlMixin from cyder.cydns.validation import validate_label, validate_name from cyder.settings import CYDNS_BASE_URL class CydnsRecord(models.Model, ObjectUrlMixin):", "`RFC 1034 <http://tools.ietf.org/html/rfc1034>`_ Call this function in models that can't overlap with an", "it should not be able to be changed. Delegated domains cannot have objects", "should be present; this ensures that the data for a canonical name and", "scripts do not care about ``fqdn``. This could change. \"the total number of", "``CydnsRecord`` will not enforce uniqueness for you. All common records have a ``fqdn``", "save(self, *args, **kwargs): if kwargs.has_key('no_build'): no_build = kwargs.pop('no_build') # Removes key. else: no_build", "that represent a name (i.e., the sum of all label octets and label", "= models.CharField(max_length=255, blank=True, null=True, validators=[validate_name]) # fqdn = label + domain.name <--- see", "so it can be rebuilt. self.domain.dirty = True self.domain.save() def set_fqdn(self): try: if", "other data should be present; this ensures that the data for a canonical", "Domain, _check_TLD_condition from cyder.cydns.mixins import ObjectUrlMixin from cyder.cydns.validation import validate_label, validate_name from cyder.settings", "it can be rebuilt. self.domain.dirty = True self.domain.save() def set_fqdn(self): try: if self.label", "using the ``unique_together`` constraint on a Model that inherits from ``CydnsRecord``, you must", "Call this function in models that can't overlap with an existing CNAME. \"\"\"", "the data for a canonical name and its aliases cannot be different.\" --", "cyder.cydns.mixins import ObjectUrlMixin from cyder.cydns.validation import validate_label, validate_name from cyder.settings import CYDNS_BASE_URL class", "you must include ``domain`` and ``label`` explicitly if you need them to. ``CydnsRecord``", "table and a ``label`` CharField. This class also inherits from the ``ObjectUrlMixin`` class", "domains cannot have objects created in them. \"\"\" if not self.domain.delegated: return if", "We don't exist yet. raise ValidationError(\"No objects can be created in the {0}\"", "a ``label`` CharField. This class also inherits from the ``ObjectUrlMixin`` class to provide", "object's domain is delegated it should not be able to be changed. Delegated", "cyder.settings import CYDNS_BASE_URL class CydnsRecord(models.Model, ObjectUrlMixin): \"\"\" This class provides common functionality that", "foreign key to the ``domain`` table and a ``label`` CharField. This class also", "# Removes key. else: no_build = False # We are rebuilding super(CydnsRecord, self).save(*args,", "can't overlap with an existing CNAME. \"\"\" CNAME = cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise", "= models.ForeignKey(Domain, null=False) label = models.CharField(max_length=100, blank=True, null=True, validators=[validate_label]) fqdn = models.CharField(max_length=255, blank=True,", "models that can't overlap with an existing CNAME. \"\"\" CNAME = cyder.cydns.cname.models.CNAME if", "object is saved:: fqdn = name + domain.name or if name == ''", "set_fqdn(self): try: if self.label == '': self.fqdn = self.domain.name else: self.fqdn = \"{0}.{1}\".format(self.label,", "present; this ensures that the data for a canonical name and its aliases", "provide the ``get_absolute_url``, ``get_edit_url``, and ``get_delete_url`` functions. This class does validation on the", "could change. \"the total number of octets that represent a name (i.e., the", "also inherits from the ``ObjectUrlMixin`` class to provide the ``get_absolute_url``, ``get_edit_url``, and ``get_delete_url``", "will not enforce uniqueness for you. All common records have a ``fqdn`` field.", "data for a canonical name and its aliases cannot be different.\" -- `RFC", "in them. \"\"\" if not self.domain.delegated: return if not self.pk: # We don't", "domain as dirty so it can be rebuilt. self.domain.dirty = True self.domain.save() def", "a ``ValidationError``. If you plan on using the ``unique_together`` constraint on a Model", "for records much easier. Instead of looking at ``obj.label`` together with ``obj.domain.name``, you", "self.pk: # We don't exist yet. raise ValidationError(\"No objects can be created in", "'': self.fqdn = self.domain.name else: self.fqdn = \"{0}.{1}\".format(self.label, self.domain.name) except ObjectDoesNotExist: return def", "This class does validation on the ``label`` field. Call ``clean_all`` to trigger the", "to. ``CydnsRecord`` will not enforce uniqueness for you. All common records have a", "just search the ``obj.fqdn`` field. As of commit 7b2fd19f, the build scripts do", "can be created in the {0}\" \"domain. It is delegated.\" .format(self.domain.name)) def check_TLD_condition(self):", "a Model that inherits from ``CydnsRecord``, you must include ``domain`` and ``label`` explicitly", "``domain`` and ``label`` explicitly if you need them to. ``CydnsRecord`` will not enforce", "not care about ``fqdn``. This could change. \"the total number of octets that", "change. \"the total number of octets that represent a name (i.e., the sum", "node, no other data should be present; this ensures that the data for", "commit 7b2fd19f, the build scripts do not care about ``fqdn``. This could change.", "sum of all label octets and label lengths) is limited to 255\" -", "time the object is saved:: fqdn = name + domain.name or if name", "class also inherits from the ``ObjectUrlMixin`` class to provide the ``get_absolute_url``, ``get_edit_url``, and", "<--- see set_fqdn class Meta: abstract = True def clean(self): self.set_fqdn() self.check_TLD_condition() def", "no_build = False # We are rebuilding super(CydnsRecord, self).save(*args, **kwargs) if no_build: pass", "all label octets and label lengths) is limited to 255\" - RFC 4471", "see set_fqdn class Meta: abstract = True def clean(self): self.set_fqdn() self.check_TLD_condition() def save(self,", "self.fqdn = \"{0}.{1}\".format(self.label, self.domain.name) except ObjectDoesNotExist: return def check_for_cname(self): \"\"\"\"If a CNAME RR", "overlap with an existing CNAME. \"\"\" CNAME = cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A", "an existing CNAME. \"\"\" CNAME = cyder.cydns.cname.models.CNAME if CNAME.objects.filter(fqdn=self.fqdn).exists(): raise ValidationError(\"A CNAME with", "``obj.fqdn`` field. As of commit 7b2fd19f, the build scripts do not care about", "# Mark the domain as dirty so it can be rebuilt. self.domain.dirty =", "models import cyder from cyder.cydns.domain.models import Domain, _check_TLD_condition from cyder.cydns.mixins import ObjectUrlMixin from", "uniqueness for you. All common records have a ``fqdn`` field. This field is", "you need them to. ``CydnsRecord`` will not enforce uniqueness for you. All common", "much easier. Instead of looking at ``obj.label`` together with ``obj.domain.name``, you can just", "that the data for a canonical name and its aliases cannot be different.\"", "if kwargs.has_key('no_build'): no_build = kwargs.pop('no_build') # Removes key. else: no_build = False #", "from cyder.cydns.mixins import ObjectUrlMixin from cyder.cydns.validation import validate_label, validate_name from cyder.settings import CYDNS_BASE_URL", "blank=True, null=True, validators=[validate_name]) # fqdn = label + domain.name <--- see set_fqdn class", "the object is saved:: fqdn = name + domain.name or if name ==", "limited to 255\" - RFC 4471 \"\"\" domain = models.ForeignKey(Domain, null=False) label =", "updated every time the object is saved:: fqdn = name + domain.name or", "check_for_cname(self): \"\"\"\"If a CNAME RR is preent at a node, no other data", "searching for records much easier. Instead of looking at ``obj.label`` together with ``obj.domain.name``,", "constraint on a Model that inherits from ``CydnsRecord``, you must include ``domain`` and", "with this name already exists.\") def check_for_delegation(self): \"\"\"If an object's domain is delegated", "CharField. This class also inherits from the ``ObjectUrlMixin`` class to provide the ``get_absolute_url``,", "validate will raise a ``ValidationError``. If you plan on using the ``unique_together`` constraint", "the ``obj.fqdn`` field. As of commit 7b2fd19f, the build scripts do not care", "4471 \"\"\" domain = models.ForeignKey(Domain, null=False) label = models.CharField(max_length=100, blank=True, null=True, validators=[validate_label]) fqdn", "this name already exists.\") def check_for_delegation(self): \"\"\"If an object's domain is delegated it", "Failure to validate will raise a ``ValidationError``. If you plan on using the", "are rebuilding super(CydnsRecord, self).save(*args, **kwargs) if no_build: pass else: # Mark the domain", "validate_name from cyder.settings import CYDNS_BASE_URL class CydnsRecord(models.Model, ObjectUrlMixin): \"\"\" This class provides common", "class CydnsRecord(models.Model, ObjectUrlMixin): \"\"\" This class provides common functionality that many DNS record", "super(CydnsRecord, self).save(*args, **kwargs) if no_build: pass else: # Mark the domain as dirty", "can be rebuilt. self.domain.dirty = True self.domain.save() def set_fqdn(self): try: if self.label ==", "this ensures that the data for a canonical name and its aliases cannot", "= True self.domain.save() def set_fqdn(self): try: if self.label == '': self.fqdn = self.domain.name", "CNAME with this name already exists.\") def check_for_delegation(self): \"\"\"If an object's domain is", "label = models.CharField(max_length=100, blank=True, null=True, validators=[validate_label]) fqdn = models.CharField(max_length=255, blank=True, null=True, validators=[validate_name]) #", "if name == '' fqdn = domain.name This field makes searching for records", "a name (i.e., the sum of all label octets and label lengths) is" ]
[ "\"\"\"Version template tag\"\"\" from django import template from django.utils.version import get_version from bounca", "django.utils.version import get_version from bounca import VERSION register = template.Library() @register.simple_tag def bounca_version():", "template from django.utils.version import get_version from bounca import VERSION register = template.Library() @register.simple_tag", "import template from django.utils.version import get_version from bounca import VERSION register = template.Library()", "<gh_stars>0 \"\"\"Version template tag\"\"\" from django import template from django.utils.version import get_version from", "tag\"\"\" from django import template from django.utils.version import get_version from bounca import VERSION", "django import template from django.utils.version import get_version from bounca import VERSION register =", "from django import template from django.utils.version import get_version from bounca import VERSION register", "template tag\"\"\" from django import template from django.utils.version import get_version from bounca import", "from django.utils.version import get_version from bounca import VERSION register = template.Library() @register.simple_tag def", "import get_version from bounca import VERSION register = template.Library() @register.simple_tag def bounca_version(): return", "get_version from bounca import VERSION register = template.Library() @register.simple_tag def bounca_version(): return str(get_version(VERSION))" ]
[ "__hash__(self): return hash((self.n,self.s)) ll = Pessoa('Lugão','Ricardo') lulu = Pessoa('Lugão','Ricardinho') print(hash(ll)) # True print(hash(lulu))", "self.n = n self.s = s def __hash__(self): return hash((self.n,self.s)) ll = Pessoa('Lugão','Ricardo')", "= n self.s = s def __hash__(self): return hash((self.n,self.s)) ll = Pessoa('Lugão','Ricardo') lulu", "s): self.n = n self.s = s def __hash__(self): return hash((self.n,self.s)) ll =", "= s def __hash__(self): return hash((self.n,self.s)) ll = Pessoa('Lugão','Ricardo') lulu = Pessoa('Lugão','Ricardinho') print(hash(ll))", "return hash((self.n,self.s)) ll = Pessoa('Lugão','Ricardo') lulu = Pessoa('Lugão','Ricardinho') print(hash(ll)) # True print(hash(lulu)) #", "self.s = s def __hash__(self): return hash((self.n,self.s)) ll = Pessoa('Lugão','Ricardo') lulu = Pessoa('Lugão','Ricardinho')", "def __hash__(self): return hash((self.n,self.s)) ll = Pessoa('Lugão','Ricardo') lulu = Pessoa('Lugão','Ricardinho') print(hash(ll)) # True", "__init__(self, n, s): self.n = n self.s = s def __hash__(self): return hash((self.n,self.s))", "class Pessoa: def __init__(self, n, s): self.n = n self.s = s def", "Pessoa: def __init__(self, n, s): self.n = n self.s = s def __hash__(self):", "s def __hash__(self): return hash((self.n,self.s)) ll = Pessoa('Lugão','Ricardo') lulu = Pessoa('Lugão','Ricardinho') print(hash(ll)) #", "n, s): self.n = n self.s = s def __hash__(self): return hash((self.n,self.s)) ll", "n self.s = s def __hash__(self): return hash((self.n,self.s)) ll = Pessoa('Lugão','Ricardo') lulu =", "<reponame>cassiasamp/live-de-python class Pessoa: def __init__(self, n, s): self.n = n self.s = s", "def __init__(self, n, s): self.n = n self.s = s def __hash__(self): return", "hash((self.n,self.s)) ll = Pessoa('Lugão','Ricardo') lulu = Pessoa('Lugão','Ricardinho') print(hash(ll)) # True print(hash(lulu)) # True" ]
[ "'r') except FileNotFoundError: f = open('f.txt', 'w') else: text = f.read() print(text) finally:", "f = open('f.txt', 'r') except FileNotFoundError: f = open('f.txt', 'w') else: text =", "try: f = open('f.txt', 'r') except FileNotFoundError: f = open('f.txt', 'w') else: text", "except FileNotFoundError: f = open('f.txt', 'w') else: text = f.read() print(text) finally: f.close()", "= open('f.txt', 'r') except FileNotFoundError: f = open('f.txt', 'w') else: text = f.read()", "open('f.txt', 'r') except FileNotFoundError: f = open('f.txt', 'w') else: text = f.read() print(text)" ]
[ "\"sflix_sys\"] def recursiveCrawler(path, project=\"\", serie=\"\", staffel=\"\", folge=\"\", filelist={}, depth=0): if depth == 0:", "project = path.split(\"\\\\\")[-1] filelist.setdefault(project, {}) elif depth == 2: serie = path.split(\"\\\\\")[-1] filelist[project].setdefault(serie,", "\"/\").replace(\"None\", \"null\") + \";\") fileWriter.close() print(\"OK\") except: print(\"Fehler\") print(\"\".center(50, \"=\")) print(\"Update abgeschlossen\".center(50)) print(\"\".center(50,", "elif depth == 1: project = path.split(\"\\\\\")[-1] filelist.setdefault(project, {}) elif depth == 2:", "recursiveCrawler(path, project=\"\", serie=\"\", staffel=\"\", folge=\"\", filelist={}, depth=0): if depth == 0: pass elif", "fileWriter.close() print(\"OK\") except: print(\"Fehler\") print(\"\".center(50, \"=\")) print(\"Update abgeschlossen\".center(50)) print(\"\".center(50, \"=\")) print() input(\"Enter zum", "{}) elif depth == 3: staffel = path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {}) elif depth ==", "= os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None) elif os.path.isdir(os.path.join(path, item)): filelist = recursiveCrawler(os.path.join(path, item),", "depth == 4: folge = path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {}) # print(f\"{project} {serie} {staffel}\") folderContent", "None) elif depth > 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None) elif", "recursiveCrawler(homeDir) print(\"OK\") except: print(\"Fehler\") # fileWriter = open(os.path.join(homeDir, \"output.txt\"), \"w\", encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\",", "staffel, folge, filelist, depth+1) return filelist print(\"Durchsuche Ordner...\".ljust(40), end=\"\") try: filelist = recursiveCrawler(homeDir)", "except: print(\"Fehler\") # fileWriter = open(os.path.join(homeDir, \"output.txt\"), \"w\", encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\"))", "encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")) # fileWriter.close() try: print(\"Erstelle Backup...\".ljust(40), end=\"\") if os.path.exists(os.path.join(homeDir,", "moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) print(\"OK\") except: print(\"Fehler\") try: print(\"Speichere neue Version...\".ljust(40),", "fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")) # fileWriter.close() try: print(\"Erstelle Backup...\".ljust(40), end=\"\") if os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")):", "2: serie = path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {}) elif depth == 3: staffel = path.split(\"\\\\\")[-1]", "1: project = path.split(\"\\\\\")[-1] filelist.setdefault(project, {}) elif depth == 2: serie = path.split(\"\\\\\")[-1]", "== 0: pass elif depth == 1: project = path.split(\"\\\\\")[-1] filelist.setdefault(project, {}) elif", "str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\") + \";\") fileWriter.close() print(\"OK\") except: print(\"Fehler\") print(\"\".center(50, \"=\")) print(\"Update abgeschlossen\".center(50))", "depth == 1: relPath = os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath)) elif depth == 2:", "def recursiveCrawler(path, project=\"\", serie=\"\", staffel=\"\", folge=\"\", filelist={}, depth=0): if depth == 0: pass", "depth == 2: serie = path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {}) elif depth == 3: staffel", "os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) print(\"OK\") except: print(\"Fehler\") try:", "as moveFile os.chdir(os.getcwd()) print(\"\".center(50, \"=\")) print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\")) homeDir = os.getcwd() allowedFileTypes", "if os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\"))", "print(\"Durchsuche Ordner...\".ljust(40), end=\"\") try: filelist = recursiveCrawler(homeDir) print(\"OK\") except: print(\"Fehler\") # fileWriter =", "if extension in allowedFileTypes: if depth == 1: relPath = os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\",", "filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None) elif depth > 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath),", "os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) print(\"OK\") except: print(\"Fehler\") try: print(\"Speichere neue Version...\".ljust(40), end=\"\") fileWriter =", "extension = item.split(\".\")[-1] if extension in allowedFileTypes: if depth == 1: relPath =", "\"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) print(\"OK\") except: print(\"Fehler\") try: print(\"Speichere", "path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {}) elif depth == 3: staffel = path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {}) elif", "{}) elif depth == 4: folge = path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {}) # print(f\"{project} {serie}", "if os.path.isfile(os.path.join(path, item)): extension = item.split(\".\")[-1] if extension in allowedFileTypes: if depth ==", "= recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge, filelist, depth+1) return filelist print(\"Durchsuche Ordner...\".ljust(40),", "\"mp4\", \"mp3\", \"png\"] diallowedItems = [\"System Volume Information\", \"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"] def recursiveCrawler(path,", "# fileWriter.close() try: print(\"Erstelle Backup...\".ljust(40), end=\"\") if os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\"))", "os.chdir(os.getcwd()) print(\"\".center(50, \"=\")) print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\")) homeDir = os.getcwd() allowedFileTypes = [\"jpg\",", "in folderContent: if not item in diallowedItems: if os.path.isfile(os.path.join(path, item)): extension = item.split(\".\")[-1]", "serie, staffel, folge, filelist, depth+1) return filelist print(\"Durchsuche Ordner...\".ljust(40), end=\"\") try: filelist =", "fileWriter = open(os.path.join(homeDir, \"output.txt\"), \"w\", encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")) # fileWriter.close() try:", "item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None) elif os.path.isdir(os.path.join(path, item)): filelist = recursiveCrawler(os.path.join(path, item), project, serie,", "return filelist print(\"Durchsuche Ordner...\".ljust(40), end=\"\") try: filelist = recursiveCrawler(homeDir) print(\"OK\") except: print(\"Fehler\") #", "\"w\", encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")) # fileWriter.close() try: print(\"Erstelle Backup...\".ljust(40), end=\"\") if", "{}) # print(f\"{project} {serie} {staffel}\") folderContent = os.listdir(path) for item in folderContent: if", "= path.split(\"\\\\\")[-1] filelist.setdefault(project, {}) elif depth == 2: serie = path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {})", "filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None) elif os.path.isdir(os.path.join(path, item)): filelist = recursiveCrawler(os.path.join(path, item), project, serie, staffel,", "print(f\"{project} {serie} {staffel}\") folderContent = os.listdir(path) for item in folderContent: if not item", "= os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None) elif depth > 3: relPath = os.path.join(path,", "relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None) elif os.path.isdir(os.path.join(path, item)): filelist = recursiveCrawler(os.path.join(path,", "\"mp3\", \"png\"] diallowedItems = [\"System Volume Information\", \"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"] def recursiveCrawler(path, project=\"\",", "== 2: serie = path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {}) elif depth == 3: staffel =", "relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None) elif depth > 3: relPath =", "= open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), \"w\", encoding=\"utf-8\") fileWriter.write(\"var data = \" + str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\",", "\"w\", encoding=\"utf-8\") fileWriter.write(\"var data = \" + str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\") + \";\") fileWriter.close()", "elif depth == 3: staffel = path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {}) elif depth == 4:", "in allowedFileTypes: if depth == 1: relPath = os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath)) elif", "print(\"Fehler\") # fileWriter = open(os.path.join(homeDir, \"output.txt\"), \"w\", encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")) #", "print(\"\".center(50, \"=\")) print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\")) homeDir = os.getcwd() allowedFileTypes = [\"jpg\", \"jpeg\",", "homeDir = os.getcwd() allowedFileTypes = [\"jpg\", \"jpeg\", \"mp4\", \"mp3\", \"png\"] diallowedItems = [\"System", "open(os.path.join(homeDir, \"output.txt\"), \"w\", encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")) # fileWriter.close() try: print(\"Erstelle Backup...\".ljust(40),", "filelist = recursiveCrawler(homeDir) print(\"OK\") except: print(\"Fehler\") # fileWriter = open(os.path.join(homeDir, \"output.txt\"), \"w\", encoding=\"utf-8\")", "relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif depth == 3: relPath = os.path.join(path,", "open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), \"w\", encoding=\"utf-8\") fileWriter.write(\"var data = \" + str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")", "path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {}) # print(f\"{project} {serie} {staffel}\") folderContent = os.listdir(path) for item in", "diallowedItems: if os.path.isfile(os.path.join(path, item)): extension = item.split(\".\")[-1] if extension in allowedFileTypes: if depth", "extension in allowedFileTypes: if depth == 1: relPath = os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath))", "elif os.path.isdir(os.path.join(path, item)): filelist = recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge, filelist, depth+1)", "elif depth == 4: folge = path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {}) # print(f\"{project} {serie} {staffel}\")", "None) elif os.path.isdir(os.path.join(path, item)): filelist = recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge, filelist,", "= [\"jpg\", \"jpeg\", \"mp4\", \"mp3\", \"png\"] diallowedItems = [\"System Volume Information\", \"$RECYCLE.BIN\", \".vscode\",", "folge=\"\", filelist={}, depth=0): if depth == 0: pass elif depth == 1: project", "== 1: project = path.split(\"\\\\\")[-1] filelist.setdefault(project, {}) elif depth == 2: serie =", "\"sflix_sys\", \"data.js\"), \"w\", encoding=\"utf-8\") fileWriter.write(\"var data = \" + str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\") +", "= os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif depth == 3: relPath = os.path.join(path, item)[len(homeDir):]", "depth+1) return filelist print(\"Durchsuche Ordner...\".ljust(40), end=\"\") try: filelist = recursiveCrawler(homeDir) print(\"OK\") except: print(\"Fehler\")", "staffel=\"\", folge=\"\", filelist={}, depth=0): if depth == 0: pass elif depth == 1:", "\";\") fileWriter.close() print(\"OK\") except: print(\"Fehler\") print(\"\".center(50, \"=\")) print(\"Update abgeschlossen\".center(50)) print(\"\".center(50, \"=\")) print() input(\"Enter", "folderContent: if not item in diallowedItems: if os.path.isfile(os.path.join(path, item)): extension = item.split(\".\")[-1] if", "= path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {}) elif depth == 4: folge = path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {})", "filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif depth == 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None)", "move as moveFile os.chdir(os.getcwd()) print(\"\".center(50, \"=\")) print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\")) homeDir = os.getcwd()", "STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\")) homeDir = os.getcwd() allowedFileTypes = [\"jpg\", \"jpeg\", \"mp4\", \"mp3\", \"png\"]", "depth > 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None) elif os.path.isdir(os.path.join(path, item)):", "item), project, serie, staffel, folge, filelist, depth+1) return filelist print(\"Durchsuche Ordner...\".ljust(40), end=\"\") try:", "end=\"\") try: filelist = recursiveCrawler(homeDir) print(\"OK\") except: print(\"Fehler\") # fileWriter = open(os.path.join(homeDir, \"output.txt\"),", "Volume Information\", \"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"] def recursiveCrawler(path, project=\"\", serie=\"\", staffel=\"\", folge=\"\", filelist={}, depth=0):", "try: filelist = recursiveCrawler(homeDir) print(\"OK\") except: print(\"Fehler\") # fileWriter = open(os.path.join(homeDir, \"output.txt\"), \"w\",", "end=\"\") if os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), os.path.join(homeDir, \"sflix_sys\",", "\"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) print(\"OK\") except:", "filelist[project][serie].setdefault(staffel, {}) elif depth == 4: folge = path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {}) # print(f\"{project}", "= path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {}) elif depth == 3: staffel = path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {})", "{serie} {staffel}\") folderContent = os.listdir(path) for item in folderContent: if not item in", "folge, filelist, depth+1) return filelist print(\"Durchsuche Ordner...\".ljust(40), end=\"\") try: filelist = recursiveCrawler(homeDir) print(\"OK\")", "{staffel}\") folderContent = os.listdir(path) for item in folderContent: if not item in diallowedItems:", "= item.split(\".\")[-1] if extension in allowedFileTypes: if depth == 1: relPath = os.path.join(path,", "print(\"OK\") except: print(\"Fehler\") print(\"\".center(50, \"=\")) print(\"Update abgeschlossen\".center(50)) print(\"\".center(50, \"=\")) print() input(\"Enter zum Beenden\")", "== 3: staffel = path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {}) elif depth == 4: folge =", "not item in diallowedItems: if os.path.isfile(os.path.join(path, item)): extension = item.split(\".\")[-1] if extension in", "\"data.js.bak\")) print(\"OK\") except: print(\"Fehler\") try: print(\"Speichere neue Version...\".ljust(40), end=\"\") fileWriter = open(os.path.join(homeDir, \"sflix_sys\",", "elif depth == 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None) elif depth", "in diallowedItems: if os.path.isfile(os.path.join(path, item)): extension = item.split(\".\")[-1] if extension in allowedFileTypes: if", "item in diallowedItems: if os.path.isfile(os.path.join(path, item)): extension = item.split(\".\")[-1] if extension in allowedFileTypes:", "# fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")) # fileWriter.close() try: print(\"Erstelle Backup...\".ljust(40), end=\"\") if os.path.exists(os.path.join(homeDir, \"sflix_sys\",", "+ \";\") fileWriter.close() print(\"OK\") except: print(\"Fehler\") print(\"\".center(50, \"=\")) print(\"Update abgeschlossen\".center(50)) print(\"\".center(50, \"=\")) print()", "= os.getcwd() allowedFileTypes = [\"jpg\", \"jpeg\", \"mp4\", \"mp3\", \"png\"] diallowedItems = [\"System Volume", "import move as moveFile os.chdir(os.getcwd()) print(\"\".center(50, \"=\")) print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\")) homeDir =", "2: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif depth == 3: relPath =", "data = \" + str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\") + \";\") fileWriter.close() print(\"OK\") except: print(\"Fehler\")", "filelist[project].setdefault(serie, {}) elif depth == 3: staffel = path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {}) elif depth", "+ str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\") + \";\") fileWriter.close() print(\"OK\") except: print(\"Fehler\") print(\"\".center(50, \"=\")) print(\"Update", "elif depth == 2: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif depth ==", "depth == 0: pass elif depth == 1: project = path.split(\"\\\\\")[-1] filelist.setdefault(project, {})", "path.split(\"\\\\\")[-1] filelist.setdefault(project, {}) elif depth == 2: serie = path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {}) elif", "\"jpeg\", \"mp4\", \"mp3\", \"png\"] diallowedItems = [\"System Volume Information\", \"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"] def", "item in folderContent: if not item in diallowedItems: if os.path.isfile(os.path.join(path, item)): extension =", "3: staffel = path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {}) elif depth == 4: folge = path.split(\"\\\\\")[-1]", "item)): filelist = recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge, filelist, depth+1) return filelist", "\"data.js\"), os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) print(\"OK\") except: print(\"Fehler\") try: print(\"Speichere neue Version...\".ljust(40), end=\"\") fileWriter", "== 1: relPath = os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath)) elif depth == 2: relPath", "> 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None) elif os.path.isdir(os.path.join(path, item)): filelist", "from shutil import move as moveFile os.chdir(os.getcwd()) print(\"\".center(50, \"=\")) print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\"))", "= os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath)) elif depth == 2: relPath = os.path.join(path, item)[len(homeDir):]", "folge = path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {}) # print(f\"{project} {serie} {staffel}\") folderContent = os.listdir(path) for", "# fileWriter = open(os.path.join(homeDir, \"output.txt\"), \"w\", encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")) # fileWriter.close()", "print(\"OK\") except: print(\"Fehler\") try: print(\"Speichere neue Version...\".ljust(40), end=\"\") fileWriter = open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"),", "0: pass elif depth == 1: project = path.split(\"\\\\\")[-1] filelist.setdefault(project, {}) elif depth", "Version...\".ljust(40), end=\"\") fileWriter = open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), \"w\", encoding=\"utf-8\") fileWriter.write(\"var data = \"", "filelist[project].setdefault(os.path.join(\".\", relPath)) elif depth == 2: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif", "encoding=\"utf-8\") fileWriter.write(\"var data = \" + str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\") + \";\") fileWriter.close() print(\"OK\")", "neue Version...\".ljust(40), end=\"\") fileWriter = open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), \"w\", encoding=\"utf-8\") fileWriter.write(\"var data =", "diallowedItems = [\"System Volume Information\", \"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"] def recursiveCrawler(path, project=\"\", serie=\"\", staffel=\"\",", "if depth == 1: relPath = os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath)) elif depth ==", "os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif depth == 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\",", "import os from shutil import move as moveFile os.chdir(os.getcwd()) print(\"\".center(50, \"=\")) print(\"Update STEFFLIX-Daten\".center(50))", "== 4: folge = path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {}) # print(f\"{project} {serie} {staffel}\") folderContent =", "item.split(\".\")[-1] if extension in allowedFileTypes: if depth == 1: relPath = os.path.join(path, item)[len(homeDir):]", "item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif depth == 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath),", "end=\"\") fileWriter = open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), \"w\", encoding=\"utf-8\") fileWriter.write(\"var data = \" +", "os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) print(\"OK\")", "if not item in diallowedItems: if os.path.isfile(os.path.join(path, item)): extension = item.split(\".\")[-1] if extension", "\"sflix_sys\", \"data.js.bak\")) print(\"OK\") except: print(\"Fehler\") try: print(\"Speichere neue Version...\".ljust(40), end=\"\") fileWriter = open(os.path.join(homeDir,", "\"=\")) print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\")) homeDir = os.getcwd() allowedFileTypes = [\"jpg\", \"jpeg\", \"mp4\",", "os.path.isfile(os.path.join(path, item)): extension = item.split(\".\")[-1] if extension in allowedFileTypes: if depth == 1:", "serie = path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {}) elif depth == 3: staffel = path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel,", "relPath)) elif depth == 2: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif depth", "for item in folderContent: if not item in diallowedItems: if os.path.isfile(os.path.join(path, item)): extension", "3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None) elif os.path.isdir(os.path.join(path, item)): filelist =", "\"null\") + \";\") fileWriter.close() print(\"OK\") except: print(\"Fehler\") print(\"\".center(50, \"=\")) print(\"Update abgeschlossen\".center(50)) print(\"\".center(50, \"=\"))", "Information\", \"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"] def recursiveCrawler(path, project=\"\", serie=\"\", staffel=\"\", folge=\"\", filelist={}, depth=0): if", "\"=\")) homeDir = os.getcwd() allowedFileTypes = [\"jpg\", \"jpeg\", \"mp4\", \"mp3\", \"png\"] diallowedItems =", "[\"System Volume Information\", \"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"] def recursiveCrawler(path, project=\"\", serie=\"\", staffel=\"\", folge=\"\", filelist={},", "depth == 1: project = path.split(\"\\\\\")[-1] filelist.setdefault(project, {}) elif depth == 2: serie", "# print(f\"{project} {serie} {staffel}\") folderContent = os.listdir(path) for item in folderContent: if not", "\"sflix_sys\", \"data.js\"), os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) print(\"OK\") except: print(\"Fehler\") try: print(\"Speichere neue Version...\".ljust(40), end=\"\")", "\"/\").replace(\"None\", \"null\")) # fileWriter.close() try: print(\"Erstelle Backup...\".ljust(40), end=\"\") if os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir,", "filelist, depth+1) return filelist print(\"Durchsuche Ordner...\".ljust(40), end=\"\") try: filelist = recursiveCrawler(homeDir) print(\"OK\") except:", "os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None) elif os.path.isdir(os.path.join(path, item)): filelist = recursiveCrawler(os.path.join(path, item), project,", "project=\"\", serie=\"\", staffel=\"\", folge=\"\", filelist={}, depth=0): if depth == 0: pass elif depth", "depth == 3: staffel = path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {}) elif depth == 4: folge", "fileWriter.close() try: print(\"Erstelle Backup...\".ljust(40), end=\"\") if os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir,", "item)): extension = item.split(\".\")[-1] if extension in allowedFileTypes: if depth == 1: relPath", "item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None) elif depth > 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\",", "fileWriter = open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), \"w\", encoding=\"utf-8\") fileWriter.write(\"var data = \" + str(filelist).replace(\"\\\\\\\\\",", "os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None) elif depth > 3: relPath = os.path.join(path, item)[len(homeDir):]", "folderContent = os.listdir(path) for item in folderContent: if not item in diallowedItems: if", "filelist print(\"Durchsuche Ordner...\".ljust(40), end=\"\") try: filelist = recursiveCrawler(homeDir) print(\"OK\") except: print(\"Fehler\") # fileWriter", "{}) elif depth == 2: serie = path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {}) elif depth ==", "if depth == 0: pass elif depth == 1: project = path.split(\"\\\\\")[-1] filelist.setdefault(project,", "relPath), None) elif os.path.isdir(os.path.join(path, item)): filelist = recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge,", "4: folge = path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {}) # print(f\"{project} {serie} {staffel}\") folderContent = os.listdir(path)", "filelist[project][serie][staffel].setdefault(folge, {}) # print(f\"{project} {serie} {staffel}\") folderContent = os.listdir(path) for item in folderContent:", "depth == 2: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif depth == 3:", "\".vscode\", \"sflix_sys\"] def recursiveCrawler(path, project=\"\", serie=\"\", staffel=\"\", folge=\"\", filelist={}, depth=0): if depth ==", "os from shutil import move as moveFile os.chdir(os.getcwd()) print(\"\".center(50, \"=\")) print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50,", "= path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {}) # print(f\"{project} {serie} {staffel}\") folderContent = os.listdir(path) for item", "relPath = os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath)) elif depth == 2: relPath = os.path.join(path,", "print(\"Fehler\") try: print(\"Speichere neue Version...\".ljust(40), end=\"\") fileWriter = open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), \"w\", encoding=\"utf-8\")", "\" + str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\") + \";\") fileWriter.close() print(\"OK\") except: print(\"Fehler\") print(\"\".center(50, \"=\"))", "== 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None) elif depth > 3:", "path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {}) elif depth == 4: folge = path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge, {}) #", "\"null\")) # fileWriter.close() try: print(\"Erstelle Backup...\".ljust(40), end=\"\") if os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\",", "shutil import move as moveFile os.chdir(os.getcwd()) print(\"\".center(50, \"=\")) print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\")) homeDir", "[\"jpg\", \"jpeg\", \"mp4\", \"mp3\", \"png\"] diallowedItems = [\"System Volume Information\", \"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"]", "print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\")) homeDir = os.getcwd() allowedFileTypes = [\"jpg\", \"jpeg\", \"mp4\", \"mp3\",", "Ordner...\".ljust(40), end=\"\") try: filelist = recursiveCrawler(homeDir) print(\"OK\") except: print(\"Fehler\") # fileWriter = open(os.path.join(homeDir,", "item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath)) elif depth == 2: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath))", "fileWriter.write(\"var data = \" + str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\") + \";\") fileWriter.close() print(\"OK\") except:", "= open(os.path.join(homeDir, \"output.txt\"), \"w\", encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")) # fileWriter.close() try: print(\"Erstelle", "filelist = recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge, filelist, depth+1) return filelist print(\"Durchsuche", "\"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"] def recursiveCrawler(path, project=\"\", serie=\"\", staffel=\"\", folge=\"\", filelist={}, depth=0): if depth", "elif depth > 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None) elif os.path.isdir(os.path.join(path,", "1: relPath = os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath)) elif depth == 2: relPath =", "print(\"Speichere neue Version...\".ljust(40), end=\"\") fileWriter = open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), \"w\", encoding=\"utf-8\") fileWriter.write(\"var data", "depth=0): if depth == 0: pass elif depth == 1: project = path.split(\"\\\\\")[-1]", "staffel = path.split(\"\\\\\")[-1] filelist[project][serie].setdefault(staffel, {}) elif depth == 4: folge = path.split(\"\\\\\")[-1] filelist[project][serie][staffel].setdefault(folge,", "serie=\"\", staffel=\"\", folge=\"\", filelist={}, depth=0): if depth == 0: pass elif depth ==", "relPath), None) elif depth > 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel][folge].setdefault(os.path.join(\".\", relPath), None)", "3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None) elif depth > 3: relPath", "elif depth == 2: serie = path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {}) elif depth == 3:", "os.path.isdir(os.path.join(path, item)): filelist = recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge, filelist, depth+1) return", "allowedFileTypes = [\"jpg\", \"jpeg\", \"mp4\", \"mp3\", \"png\"] diallowedItems = [\"System Volume Information\", \"$RECYCLE.BIN\",", "os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath)) elif depth == 2: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\",", "try: print(\"Erstelle Backup...\".ljust(40), end=\"\") if os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\",", "\"png\"] diallowedItems = [\"System Volume Information\", \"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"] def recursiveCrawler(path, project=\"\", serie=\"\",", "except: print(\"Fehler\") try: print(\"Speichere neue Version...\".ljust(40), end=\"\") fileWriter = open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), \"w\",", "try: print(\"Speichere neue Version...\".ljust(40), end=\"\") fileWriter = open(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), \"w\", encoding=\"utf-8\") fileWriter.write(\"var", "moveFile os.chdir(os.getcwd()) print(\"\".center(50, \"=\")) print(\"Update STEFFLIX-Daten\".center(50)) print(\"\".center(50, \"=\")) homeDir = os.getcwd() allowedFileTypes =", "depth == 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None) elif depth >", "filelist.setdefault(project, {}) elif depth == 2: serie = path.split(\"\\\\\")[-1] filelist[project].setdefault(serie, {}) elif depth", "== 2: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie].setdefault(os.path.join(\".\", relPath)) elif depth == 3: relPath", "print(\"\".center(50, \"=\")) homeDir = os.getcwd() allowedFileTypes = [\"jpg\", \"jpeg\", \"mp4\", \"mp3\", \"png\"] diallowedItems", "\"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) print(\"OK\") except: print(\"Fehler\")", "recursiveCrawler(os.path.join(path, item), project, serie, staffel, folge, filelist, depth+1) return filelist print(\"Durchsuche Ordner...\".ljust(40), end=\"\")", "\"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) print(\"OK\") except: print(\"Fehler\") try: print(\"Speichere neue", "print(\"Erstelle Backup...\".ljust(40), end=\"\") if os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"),", "filelist={}, depth=0): if depth == 0: pass elif depth == 1: project =", "\"output.txt\"), \"w\", encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\")) # fileWriter.close() try: print(\"Erstelle Backup...\".ljust(40), end=\"\")", "= \" + str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\") + \";\") fileWriter.close() print(\"OK\") except: print(\"Fehler\") print(\"\".center(50,", "print(\"OK\") except: print(\"Fehler\") # fileWriter = open(os.path.join(homeDir, \"output.txt\"), \"w\", encoding=\"utf-8\") # fileWriter.write(str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\",", "= os.listdir(path) for item in folderContent: if not item in diallowedItems: if os.path.isfile(os.path.join(path,", "\"data.js\"), \"w\", encoding=\"utf-8\") fileWriter.write(\"var data = \" + str(filelist).replace(\"\\\\\\\\\", \"/\").replace(\"None\", \"null\") + \";\")", "= [\"System Volume Information\", \"$RECYCLE.BIN\", \".vscode\", \"sflix_sys\"] def recursiveCrawler(path, project=\"\", serie=\"\", staffel=\"\", folge=\"\",", "relPath)) elif depth == 3: relPath = os.path.join(path, item)[len(homeDir):] filelist[project][serie][staffel].setdefault(os.path.join(\".\", relPath), None) elif", "= recursiveCrawler(homeDir) print(\"OK\") except: print(\"Fehler\") # fileWriter = open(os.path.join(homeDir, \"output.txt\"), \"w\", encoding=\"utf-8\") #", "os.getcwd() allowedFileTypes = [\"jpg\", \"jpeg\", \"mp4\", \"mp3\", \"png\"] diallowedItems = [\"System Volume Information\",", "allowedFileTypes: if depth == 1: relPath = os.path.join(path, item)[len(homeDir):] filelist[project].setdefault(os.path.join(\".\", relPath)) elif depth", "project, serie, staffel, folge, filelist, depth+1) return filelist print(\"Durchsuche Ordner...\".ljust(40), end=\"\") try: filelist", "Backup...\".ljust(40), end=\"\") if os.path.exists(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")): os.remove(os.path.join(homeDir, \"sflix_sys\", \"data.js.bak\")) moveFile(os.path.join(homeDir, \"sflix_sys\", \"data.js\"), os.path.join(homeDir,", "os.listdir(path) for item in folderContent: if not item in diallowedItems: if os.path.isfile(os.path.join(path, item)):", "pass elif depth == 1: project = path.split(\"\\\\\")[-1] filelist.setdefault(project, {}) elif depth ==" ]
[ "ljoy_h, ljoy_l, buttons, ext): # send output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons))", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND", "our first if ord(d) == 0xff: #print \"Oxff found\" return self.readPacket(inst, 1) else:", "AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL", "= -1): d = self.ser.read() if d == '': #print \"Fail Read\" return", "power >= -1.0: self.extInstruction(0x70 + int(power*10)) def setDigital(self, id, direction, value): \"\"\" Set", "LABS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "found, restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif mode == 2: #", "== 0xff: #print \"Oxff found\" return self.readPacket(inst, 1) else: #print \"Oxff NOT found,", "without # modification, are permitted provided that the following conditions are met: #", "!= 255: #print \"Checksum ERROR\" return -1 return value # fail return -1", "\"\"\" self.extInstruction(0x1B) return self.readPacket(0x1B) def motorsOff(self): self.extInstruction(0x40) def leftMotor(self, power): \"\"\" Set left", "name of the Vanadium Labs LLC nor the names of its # contributors", "return -1 def extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst) def readAnalog(self, id): \"\"\" Read", "ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING", "software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY", "byte if mode == 0: # get our first if ord(d) == 0xff:", "self.readPacket(0x1B) def motorsOff(self): self.extInstruction(0x40) def leftMotor(self, power): \"\"\" Set left motor power, -1", "= 1 LOW = 0 HIGH = 1 class CommanderEXT(): NO_ACTION = 0x08", "d == '': #print \"Fail Read\" return -1 # now process our byte", "self.readPacket(0x10 + id) def readDigital(self): \"\"\" Read all 8 digital ports as a", "return self.readPacket(inst, 3, ord(d)) elif mode == 3: # get checksum #print \"Checksum", "OUTPUT, HIGH) if i > 2: c.setDigital(i-2, INPUT, LOW) time.sleep(0.25) c.setDigital(4, OUTPUT, HIGH)", "def motorsOff(self): self.extInstruction(0x40) def leftMotor(self, power): \"\"\" Set left motor power, -1 to", "and power >= -1.0: self.extInstruction(0x50 + int(power*10)) def rightMotor(self, power): \"\"\" Set right", "Cycle digital ports using extended mode for i in range(8): c.setDigital(i, OUTPUT, HIGH)", "distribution. # * Neither the name of the Vanadium Labs LLC nor the", "Copyright (c) 2008-2010 Vanadium Labs LLC. All right reserved. # # Redistribution and", "= port self.ser.timeout = 0.5 self.ser.open() def sendPacket(self, rjoy_h, rjoy_l, ljoy_h, ljoy_l, buttons,", "id, direction, value): \"\"\" Set a digital pin value. id is 0 to", "byte. \"\"\" self.extInstruction(0x1B) return self.readPacket(0x1B) def motorsOff(self): self.extInstruction(0x40) def leftMotor(self, power): \"\"\" Set", "direction, value): \"\"\" Set a digital pin value. id is 0 to 7.", "elif mode == 3: # get checksum #print \"Checksum found: \" + str(ord(d))", "# Read digital inputs print \"Digital:\", c.readDigital() # Exercise turret for i in", "BUT_R3 = 4 BUT_L4 = 8 BUT_L5 = 16 BUT_L6 = 32 BUT_RT", "CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "* Redistributions of source code must retain the above copyright # notice, this", "THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF", "power): \"\"\" Set right motor power, -1 to 1. \"\"\" if power <=", "# Copyright (c) 2008-2010 Vanadium Labs LLC. All right reserved. # # Redistribution", "c.setDigital(i, OUTPUT, HIGH) if i > 2: c.setDigital(i-2, INPUT, LOW) time.sleep(0.25) c.setDigital(4, OUTPUT,", "-1.0: self.extInstruction(0x50 + int(power*10)) def rightMotor(self, power): \"\"\" Set right motor power, -1", "0.5 self.ser.open() def sendPacket(self, rjoy_h, rjoy_l, ljoy_h, ljoy_l, buttons, ext): # send output", "are permitted provided that the following conditions are met: # * Redistributions of", "HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT", "of conditions and the following disclaimer. # * Redistributions in binary form must", "\"\"\" self.extInstruction(0x80 + 4*id + direction*2 + value) if __name__ == \"__main__\": #", "conditions and the following disclaimer in the # documentation and/or other materials provided", "now process our byte if mode == 0: # get our first if", "port): self.ser = serial.Serial() self.ser.baudrate = 38400 self.ser.port = port self.ser.timeout = 0.5", "1.0 and power >= -1.0: self.extInstruction(0x50 + int(power*10)) def rightMotor(self, power): \"\"\" Set", "16 BUT_L6 = 32 BUT_RT = 64 BUT_LT = 128 INPUT = 0", "found, restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif mode == 1: #", "#print \"Oxff NOT found, restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif mode", "c.pan = 512 for i in range(20): c.tilt = 312 + i*20 c.extInstruction(c.NO_ACTION)", "LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND", "SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import time,", "c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan = 512 for i in range(20): c.tilt = 312 +", "\" + str(ord(d)) return self.readPacket(inst, 0) elif mode == 2: # get value", "to 7. \"\"\" self.extInstruction(0x10 + id) return self.readPacket(0x10 + id) def readDigital(self): \"\"\"", "str(ord(d)) return self.readPacket(inst, 0) elif mode == 2: # get value return self.readPacket(inst,", "# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO,", "3: # get checksum #print \"Checksum found: \" + str(ord(d)) checksum = inst", "Neither the name of the Vanadium Labs LLC nor the names of its", "WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF", "notice, this list of conditions and the following disclaimer in the # documentation", "Vanadium Labs LLC. All right reserved. # # Redistribution and use in source", "met: # * Redistributions of source code must retain the above copyright #", "0 or 1. \"\"\" self.extInstruction(0x80 + 4*id + direction*2 + value) if __name__", "'': #print \"Fail Read\" return -1 # now process our byte if mode", "found\" return self.readPacket(inst, 1) else: #print \"Oxff NOT found, restart: \" + str(ord(d))", "this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED", "CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst) def readAnalog(self, id): \"\"\" Read an analog port,", "elif mode == 2: # get value return self.readPacket(inst, 3, ord(d)) elif mode", "= 1 BUT_R2 = 2 BUT_R3 = 4 BUT_L4 = 8 BUT_L5 =", "this list of conditions and the following disclaimer. # * Redistributions in binary", "OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "readPacket(self, inst, mode = 0, value = -1): d = self.ser.read() if d", "NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,", "LOW = 0 HIGH = 1 class CommanderEXT(): NO_ACTION = 0x08 pan =", "the following conditions are met: # * Redistributions of source code must retain", "512 tilt = 512 def __init__(self, port): self.ser = serial.Serial() self.ser.baudrate = 38400", "checksum = inst + value + ord(d) #print \"Checksum computed: \" + str(checksum)", "256 != 255: #print \"Checksum ERROR\" return -1 return value # fail return", "with or without # modification, are permitted provided that the following conditions are", "digital ports using extended mode for i in range(8): c.setDigital(i, OUTPUT, HIGH) if", "for i in range(8): print c.readAnalog(i) # Read digital inputs print \"Digital:\", c.readDigital()", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE)", "\"\"\" Set right motor power, -1 to 1. \"\"\" if power <= 1.0", "conditions and the following disclaimer. # * Redistributions in binary form must reproduce", "OUTPUT, HIGH) c.setDigital(6, OUTPUT, HIGH) # Read analog inputs for i in range(8):", "i > 2: c.setDigital(i-2, INPUT, LOW) time.sleep(0.25) c.setDigital(4, OUTPUT, HIGH) c.setDigital(6, OUTPUT, HIGH)", "Commander Extended Instruction Set Example # Copyright (c) 2008-2010 Vanadium Labs LLC. All", "1) else: #print \"Oxff NOT found, restart: \" + str(ord(d)) return self.readPacket(inst, 0)", "if ord(d) == 0xff: #print \"Oxff found\" return self.readPacket(inst, 1) else: #print \"Oxff", "return -1 # now process our byte if mode == 0: # get", "\" + str(ord(d)) return self.readPacket(inst, 0) elif mode == 1: # get our", "#print \"Checksum ERROR\" return -1 return value # fail return -1 def extInstruction(self,", "readDigital(self): \"\"\" Read all 8 digital ports as a single byte. \"\"\" self.extInstruction(0x1B)", "\"\"\" Set left motor power, -1 to 1. \"\"\" if power <= 1.0", "int(power*10)) def setDigital(self, id, direction, value): \"\"\" Set a digital pin value. id", "* Neither the name of the Vanadium Labs LLC nor the names of", "if __name__ == \"__main__\": # commanderEXT.py <serialport> c = CommanderEXT(sys.argv[1]) # Cycle digital", "# from this software without specific prior written permission. # # THIS SOFTWARE", "\"Instruction NOT found, restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif mode ==", "digital pin value. id is 0 to 7. value and direction are 0", "#print \"Instruction NOT found, restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif mode", "following disclaimer. # * Redistributions in binary form must reproduce the above copyright", "self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self, inst,", "CommanderEXT(sys.argv[1]) # Cycle digital ports using extended mode for i in range(8): c.setDigital(i,", "1. \"\"\" self.extInstruction(0x80 + 4*id + direction*2 + value) if __name__ == \"__main__\":", "ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED", "-1.0: self.extInstruction(0x70 + int(power*10)) def setDigital(self, id, direction, value): \"\"\" Set a digital", "inst + value + ord(d) #print \"Checksum computed: \" + str(checksum) if checksum", "using extended mode for i in range(8): c.setDigital(i, OUTPUT, HIGH) if i >", "and direction are 0 or 1. \"\"\" self.extInstruction(0x80 + 4*id + direction*2 +", "LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES,", "BUT_R2 = 2 BUT_R3 = 4 BUT_L4 = 8 BUT_L5 = 16 BUT_L6", "BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN", "the distribution. # * Neither the name of the Vanadium Labs LLC nor", "above copyright # notice, this list of conditions and the following disclaimer. #", "list of conditions and the following disclaimer. # * Redistributions in binary form", "\"\"\" if power <= 1.0 and power >= -1.0: self.extInstruction(0x50 + int(power*10)) def", "VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR", "permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "Redistributions in binary form must reproduce the above copyright # notice, this list", "products derived # from this software without specific prior written permission. # #", "return self.readPacket(0x10 + id) def readDigital(self): \"\"\" Read all 8 digital ports as", "# fail return -1 def extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst) def readAnalog(self, id):", "= inst + value + ord(d) #print \"Checksum computed: \" + str(checksum) if", "if ord(d) == inst: #print \"Instruction found\" return self.readPacket(inst, 2) else: #print \"Instruction", "+ int(power*10)) def setDigital(self, id, direction, value): \"\"\" Set a digital pin value.", "rjoy_l, ljoy_h, ljoy_l, buttons, ext): # send output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l))", "= 0, value = -1): d = self.ser.read() if d == '': #print", "NO_ACTION = 0x08 pan = 512 tilt = 512 def __init__(self, port): self.ser", "or promote products derived # from this software without specific prior written permission.", "2) else: #print \"Instruction NOT found, restart: \" + str(ord(d)) return self.readPacket(inst, 0)", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import time, sys, serial # Commander", "inst: #print \"Instruction found\" return self.readPacket(inst, 2) else: #print \"Instruction NOT found, restart:", "found\" return self.readPacket(inst, 2) else: #print \"Instruction NOT found, restart: \" + str(ord(d))", "power): \"\"\" Set left motor power, -1 to 1. \"\"\" if power <=", "restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif mode == 1: # get", "# Read analog inputs for i in range(8): print c.readAnalog(i) # Read digital", "__init__(self, port): self.ser = serial.Serial() self.ser.baudrate = 38400 self.ser.port = port self.ser.timeout =", "1: # get our instruction if ord(d) == inst: #print \"Instruction found\" return", "ord(d)) elif mode == 3: # get checksum #print \"Checksum found: \" +", "serial.Serial() self.ser.baudrate = 38400 self.ser.port = port self.ser.timeout = 0.5 self.ser.open() def sendPacket(self,", "WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING", "== 0: # get our first if ord(d) == 0xff: #print \"Oxff found\"", "# Cycle digital ports using extended mode for i in range(8): c.setDigital(i, OUTPUT,", "<= 1.0 and power >= -1.0: self.extInstruction(0x50 + int(power*10)) def rightMotor(self, power): \"\"\"", "time, sys, serial # Commander definitions BUT_R1 = 1 BUT_R2 = 2 BUT_R3", "def __init__(self, port): self.ser = serial.Serial() self.ser.baudrate = 38400 self.ser.port = port self.ser.timeout", "c.readDigital() # Exercise turret for i in range(20): c.pan = 312 + i*20", "def extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst) def readAnalog(self, id): \"\"\" Read an analog", "rjoy_h, rjoy_l, ljoy_h, ljoy_l, buttons, ext): # send output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h))", "-1 to 1. \"\"\" if power <= 1.0 and power >= -1.0: self.extInstruction(0x70", "c.readAnalog(i) # Read digital inputs print \"Digital:\", c.readDigital() # Exercise turret for i", "= 0.5 self.ser.open() def sendPacket(self, rjoy_h, rjoy_l, ljoy_h, ljoy_l, buttons, ext): # send", "* Redistributions in binary form must reproduce the above copyright # notice, this", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS", "\"\"\" Read an analog port, id is 0 to 7. \"\"\" self.extInstruction(0x10 +", "or without # modification, are permitted provided that the following conditions are met:", "get our instruction if ord(d) == inst: #print \"Instruction found\" return self.readPacket(inst, 2)", "HIGH = 1 class CommanderEXT(): NO_ACTION = 0x08 pan = 512 tilt =", "CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY", "without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE", "documentation and/or other materials provided with the distribution. # * Neither the name", "the name of the Vanadium Labs LLC nor the names of its #", "conditions are met: # * Redistributions of source code must retain the above", "OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY", "3, ord(d)) elif mode == 3: # get checksum #print \"Checksum found: \"", "import time, sys, serial # Commander definitions BUT_R1 = 1 BUT_R2 = 2", "255: #print \"Checksum ERROR\" return -1 return value # fail return -1 def", "= 0 OUTPUT = 1 LOW = 0 HIGH = 1 class CommanderEXT():", "id): \"\"\" Read an analog port, id is 0 to 7. \"\"\" self.extInstruction(0x10", "code must retain the above copyright # notice, this list of conditions and", "# * Redistributions in binary form must reproduce the above copyright # notice,", "# notice, this list of conditions and the following disclaimer in the #", "turret for i in range(20): c.pan = 312 + i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan", "# modification, are permitted provided that the following conditions are met: # *", "= 32 BUT_RT = 64 BUT_LT = 128 INPUT = 0 OUTPUT =", "self.ser.baudrate = 38400 self.ser.port = port self.ser.timeout = 0.5 self.ser.open() def sendPacket(self, rjoy_h,", "ord(d) == 0xff: #print \"Oxff found\" return self.readPacket(inst, 1) else: #print \"Oxff NOT", "= self.ser.read() if d == '': #print \"Fail Read\" return -1 # now", "pin value. id is 0 to 7. value and direction are 0 or", "# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "Set right motor power, -1 to 1. \"\"\" if power <= 1.0 and", "INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED", "(c) 2008-2010 Vanadium Labs LLC. All right reserved. # # Redistribution and use", "if mode == 0: # get our first if ord(d) == 0xff: #print", "2008-2010 Vanadium Labs LLC. All right reserved. # # Redistribution and use in", "the Vanadium Labs LLC nor the names of its # contributors may be", "- ArbotiX Commander Extended Instruction Set Example # Copyright (c) 2008-2010 Vanadium Labs", "4 BUT_L4 = 8 BUT_L5 = 16 BUT_L6 = 32 BUT_RT = 64", "to endorse or promote products derived # from this software without specific prior", "all 8 digital ports as a single byte. \"\"\" self.extInstruction(0x1B) return self.readPacket(0x1B) def", "HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst) def readAnalog(self, id): \"\"\" Read an analog port, id", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN", "+ id) return self.readPacket(0x10 + id) def readDigital(self): \"\"\" Read all 8 digital", "and use in source and binary forms, with or without # modification, are", "sys, serial # Commander definitions BUT_R1 = 1 BUT_R2 = 2 BUT_R3 =", "HIGH) c.setDigital(6, OUTPUT, HIGH) # Read analog inputs for i in range(8): print", "sendPacket(self, rjoy_h, rjoy_l, ljoy_h, ljoy_l, buttons, ext): # send output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l))", "restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif mode == 2: # get", "BUT_RT = 64 BUT_LT = 128 INPUT = 0 OUTPUT = 1 LOW", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,", "= 312 + i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan = 512 for i in range(20):", "== 1: # get our instruction if ord(d) == inst: #print \"Instruction found\"", "self.ser.port = port self.ser.timeout = 0.5 self.ser.open() def sendPacket(self, rjoy_h, rjoy_l, ljoy_h, ljoy_l,", "# * Redistributions of source code must retain the above copyright # notice,", "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT", "for i in range(20): c.pan = 312 + i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan =", "this list of conditions and the following disclaimer in the # documentation and/or", "Vanadium Labs LLC nor the names of its # contributors may be used", "\"Digital:\", c.readDigital() # Exercise turret for i in range(20): c.pan = 312 +", "# DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT,", "#print \"Checksum found: \" + str(ord(d)) checksum = inst + value + ord(d)", "INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS", "# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF", "DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY", "right motor power, -1 to 1. \"\"\" if power <= 1.0 and power", "return -1 return value # fail return -1 def extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0,", "+ i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan = 512 for i in range(20): c.tilt =", "1. \"\"\" if power <= 1.0 and power >= -1.0: self.extInstruction(0x70 + int(power*10))", "of its # contributors may be used to endorse or promote products derived", "(INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS", "self.extInstruction(0x70 + int(power*10)) def setDigital(self, id, direction, value): \"\"\" Set a digital pin", "disclaimer in the # documentation and/or other materials provided with the distribution. #", "port, id is 0 to 7. \"\"\" self.extInstruction(0x10 + id) return self.readPacket(0x10 +", "endorse or promote products derived # from this software without specific prior written", "= 64 BUT_LT = 128 INPUT = 0 OUTPUT = 1 LOW =", "INPUT = 0 OUTPUT = 1 LOW = 0 HIGH = 1 class", "def leftMotor(self, power): \"\"\" Set left motor power, -1 to 1. \"\"\" if", "return self.readPacket(inst, 0) elif mode == 1: # get our instruction if ord(d)", "2 BUT_R3 = 4 BUT_L4 = 8 BUT_L5 = 16 BUT_L6 = 32", "OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF", "return value # fail return -1 def extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst) def", "# get our instruction if ord(d) == inst: #print \"Instruction found\" return self.readPacket(inst,", "power, -1 to 1. \"\"\" if power <= 1.0 and power >= -1.0:", "64 BUT_LT = 128 INPUT = 0 OUTPUT = 1 LOW = 0", "else: #print \"Instruction NOT found, restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif", "0 HIGH = 1 class CommanderEXT(): NO_ACTION = 0x08 pan = 512 tilt", "OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON", "must retain the above copyright # notice, this list of conditions and the", "# CommEXT.py - ArbotiX Commander Extended Instruction Set Example # Copyright (c) 2008-2010", "NOT found, restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif mode == 1:", "BUT_L5 = 16 BUT_L6 = 32 BUT_RT = 64 BUT_LT = 128 INPUT", "INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT,", "leftMotor(self, power): \"\"\" Set left motor power, -1 to 1. \"\"\" if power", "= 2 BUT_R3 = 4 BUT_L4 = 8 BUT_L5 = 16 BUT_L6 =", "Redistribution and use in source and binary forms, with or without # modification,", "Set left motor power, -1 to 1. \"\"\" if power <= 1.0 and", "DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,", "source and binary forms, with or without # modification, are permitted provided that", "value # fail return -1 def extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst) def readAnalog(self,", "IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import time, sys, serial", "= 512 def __init__(self, port): self.ser = serial.Serial() self.ser.baudrate = 38400 self.ser.port =", "i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan = 512 for i in range(20): c.tilt = 312", "Exercise turret for i in range(20): c.pan = 312 + i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2)", "above copyright # notice, this list of conditions and the following disclaimer in", "BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF", "1.0 and power >= -1.0: self.extInstruction(0x70 + int(power*10)) def setDigital(self, id, direction, value):", "binary form must reproduce the above copyright # notice, this list of conditions", "Redistributions of source code must retain the above copyright # notice, this list", "to 7. value and direction are 0 or 1. \"\"\" self.extInstruction(0x80 + 4*id", "form must reproduce the above copyright # notice, this list of conditions and", "used to endorse or promote products derived # from this software without specific", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #", "8 digital ports as a single byte. \"\"\" self.extInstruction(0x1B) return self.readPacket(0x1B) def motorsOff(self):", "str(ord(d)) checksum = inst + value + ord(d) #print \"Checksum computed: \" +", "TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "analog inputs for i in range(8): print c.readAnalog(i) # Read digital inputs print", "notice, this list of conditions and the following disclaimer. # * Redistributions in", "LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,", "BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "== inst: #print \"Instruction found\" return self.readPacket(inst, 2) else: #print \"Instruction NOT found,", "provided that the following conditions are met: # * Redistributions of source code", "fail return -1 def extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst) def readAnalog(self, id): \"\"\"", "value. id is 0 to 7. value and direction are 0 or 1.", "inst) def readAnalog(self, id): \"\"\" Read an analog port, id is 0 to", "ArbotiX Commander Extended Instruction Set Example # Copyright (c) 2008-2010 Vanadium Labs LLC.", "mode == 0: # get our first if ord(d) == 0xff: #print \"Oxff", "\"Oxff found\" return self.readPacket(inst, 1) else: #print \"Oxff NOT found, restart: \" +", "are met: # * Redistributions of source code must retain the above copyright", "BUT_L6 = 32 BUT_RT = 64 BUT_LT = 128 INPUT = 0 OUTPUT", "512 def __init__(self, port): self.ser = serial.Serial() self.ser.baudrate = 38400 self.ser.port = port", "process our byte if mode == 0: # get our first if ord(d)", "instruction if ord(d) == inst: #print \"Instruction found\" return self.readPacket(inst, 2) else: #print", "def readDigital(self): \"\"\" Read all 8 digital ports as a single byte. \"\"\"", "computed: \" + str(checksum) if checksum % 256 != 255: #print \"Checksum ERROR\"", "commanderEXT.py <serialport> c = CommanderEXT(sys.argv[1]) # Cycle digital ports using extended mode for", "a single byte. \"\"\" self.extInstruction(0x1B) return self.readPacket(0x1B) def motorsOff(self): self.extInstruction(0x40) def leftMotor(self, power):", "Labs LLC. All right reserved. # # Redistribution and use in source and", "USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH", "in range(8): print c.readAnalog(i) # Read digital inputs print \"Digital:\", c.readDigital() # Exercise", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR", "OF THE POSSIBILITY OF SUCH DAMAGE. import time, sys, serial # Commander definitions", "the above copyright # notice, this list of conditions and the following disclaimer", "A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE", "# get value return self.readPacket(inst, 3, ord(d)) elif mode == 3: # get", "% 256 != 255: #print \"Checksum ERROR\" return -1 return value # fail", "with the distribution. # * Neither the name of the Vanadium Labs LLC", "EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import time, sys,", "LLC. All right reserved. # # Redistribution and use in source and binary", "HIGH) # Read analog inputs for i in range(8): print c.readAnalog(i) # Read", "self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self, inst, mode = 0,", "value = -1): d = self.ser.read() if d == '': #print \"Fail Read\"", "str(ord(d)) return self.readPacket(inst, 0) elif mode == 1: # get our instruction if", "and the following disclaimer in the # documentation and/or other materials provided with", "return self.readPacket(inst, 1) else: #print \"Oxff NOT found, restart: \" + str(ord(d)) return", "OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO", "tilt = 512 def __init__(self, port): self.ser = serial.Serial() self.ser.baudrate = 38400 self.ser.port", "c = CommanderEXT(sys.argv[1]) # Cycle digital ports using extended mode for i in", "forms, with or without # modification, are permitted provided that the following conditions", "self.readPacket(inst, 0) elif mode == 2: # get value return self.readPacket(inst, 3, ord(d))", "return self.readPacket(0x1B) def motorsOff(self): self.extInstruction(0x40) def leftMotor(self, power): \"\"\" Set left motor power,", "INPUT, LOW) time.sleep(0.25) c.setDigital(4, OUTPUT, HIGH) c.setDigital(6, OUTPUT, HIGH) # Read analog inputs", "8 BUT_L5 = 16 BUT_L6 = 32 BUT_RT = 64 BUT_LT = 128", "7. value and direction are 0 or 1. \"\"\" self.extInstruction(0x80 + 4*id +", "source code must retain the above copyright # notice, this list of conditions", "CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "contributors may be used to endorse or promote products derived # from this", "\"\"\" self.extInstruction(0x10 + id) return self.readPacket(0x10 + id) def readDigital(self): \"\"\" Read all", "in source and binary forms, with or without # modification, are permitted provided", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES", "All right reserved. # # Redistribution and use in source and binary forms,", "# get checksum #print \"Checksum found: \" + str(ord(d)) checksum = inst +", "# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED.", "mode for i in range(8): c.setDigital(i, OUTPUT, HIGH) if i > 2: c.setDigital(i-2,", "ERROR\" return -1 return value # fail return -1 def extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256,", "# # Redistribution and use in source and binary forms, with or without", "NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF", "0) elif mode == 2: # get value return self.readPacket(inst, 3, ord(d)) elif", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND", "BUT_R1 = 1 BUT_R2 = 2 BUT_R3 = 4 BUT_L4 = 8 BUT_L5", "0 OUTPUT = 1 LOW = 0 HIGH = 1 class CommanderEXT(): NO_ACTION", "value and direction are 0 or 1. \"\"\" self.extInstruction(0x80 + 4*id + direction*2", "id) return self.readPacket(0x10 + id) def readDigital(self): \"\"\" Read all 8 digital ports", "get value return self.readPacket(inst, 3, ord(d)) elif mode == 3: # get checksum", "# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,", "+ str(ord(d)) return self.readPacket(inst, 0) elif mode == 1: # get our instruction", "= 512 tilt = 512 def __init__(self, port): self.ser = serial.Serial() self.ser.baudrate =", "self.readPacket(inst, 0) elif mode == 1: # get our instruction if ord(d) ==", "may be used to endorse or promote products derived # from this software", "Commander definitions BUT_R1 = 1 BUT_R2 = 2 BUT_R3 = 4 BUT_L4 =", "prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "buttons, ext): # send output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255", "in range(20): c.pan = 312 + i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan = 512 for", "HIGH) if i > 2: c.setDigital(i-2, INPUT, LOW) time.sleep(0.25) c.setDigital(4, OUTPUT, HIGH) c.setDigital(6,", "EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL,", "> 2: c.setDigital(i-2, INPUT, LOW) time.sleep(0.25) c.setDigital(4, OUTPUT, HIGH) c.setDigital(6, OUTPUT, HIGH) #", "our byte if mode == 0: # get our first if ord(d) ==", "direction*2 + value) if __name__ == \"__main__\": # commanderEXT.py <serialport> c = CommanderEXT(sys.argv[1])", "\"__main__\": # commanderEXT.py <serialport> c = CommanderEXT(sys.argv[1]) # Cycle digital ports using extended", "self.ser.open() def sendPacket(self, rjoy_h, rjoy_l, ljoy_h, ljoy_l, buttons, ext): # send output self.ser.write('\\xFF')", "AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR", "\"Fail Read\" return -1 # now process our byte if mode == 0:", "+ str(ord(d)) return self.readPacket(inst, 0) elif mode == 2: # get value return", "for i in range(8): c.setDigital(i, OUTPUT, HIGH) if i > 2: c.setDigital(i-2, INPUT,", "inst, mode = 0, value = -1): d = self.ser.read() if d ==", "self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self, inst, mode = 0, value =", "Read digital inputs print \"Digital:\", c.readDigital() # Exercise turret for i in range(20):", "= 128 INPUT = 0 OUTPUT = 1 LOW = 0 HIGH =", "<serialport> c = CommanderEXT(sys.argv[1]) # Cycle digital ports using extended mode for i", "GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION)", "mode == 1: # get our instruction if ord(d) == inst: #print \"Instruction", "# contributors may be used to endorse or promote products derived # from", "AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY", "range(8): print c.readAnalog(i) # Read digital inputs print \"Digital:\", c.readDigital() # Exercise turret", "OUTPUT, HIGH) # Read analog inputs for i in range(8): print c.readAnalog(i) #", "THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "mode = 0, value = -1): d = self.ser.read() if d == '':", "self.ser = serial.Serial() self.ser.baudrate = 38400 self.ser.port = port self.ser.timeout = 0.5 self.ser.open()", "use in source and binary forms, with or without # modification, are permitted", "return self.readPacket(inst, 2) else: #print \"Instruction NOT found, restart: \" + str(ord(d)) return", "in binary form must reproduce the above copyright # notice, this list of", "# documentation and/or other materials provided with the distribution. # * Neither the", "1 class CommanderEXT(): NO_ACTION = 0x08 pan = 512 tilt = 512 def", "#print \"Checksum computed: \" + str(checksum) if checksum % 256 != 255: #print", "of the Vanadium Labs LLC nor the names of its # contributors may", "ports as a single byte. \"\"\" self.extInstruction(0x1B) return self.readPacket(0x1B) def motorsOff(self): self.extInstruction(0x40) def", "\"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "single byte. \"\"\" self.extInstruction(0x1B) return self.readPacket(0x1B) def motorsOff(self): self.extInstruction(0x40) def leftMotor(self, power): \"\"\"", "c.setDigital(i-2, INPUT, LOW) time.sleep(0.25) c.setDigital(4, OUTPUT, HIGH) c.setDigital(6, OUTPUT, HIGH) # Read analog", "THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import", "Set Example # Copyright (c) 2008-2010 Vanadium Labs LLC. All right reserved. #", "to 1. \"\"\" if power <= 1.0 and power >= -1.0: self.extInstruction(0x50 +", "ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT", "BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES", "c.setDigital(6, OUTPUT, HIGH) # Read analog inputs for i in range(8): print c.readAnalog(i)", "mode == 3: # get checksum #print \"Checksum found: \" + str(ord(d)) checksum", "-1 def extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst) def readAnalog(self, id): \"\"\" Read an", "extended mode for i in range(8): c.setDigital(i, OUTPUT, HIGH) if i > 2:", "self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self, inst, mode", "id) def readDigital(self): \"\"\" Read all 8 digital ports as a single byte.", "self.extInstruction(0x1B) return self.readPacket(0x1B) def motorsOff(self): self.extInstruction(0x40) def leftMotor(self, power): \"\"\" Set left motor", "= 8 BUT_L5 = 16 BUT_L6 = 32 BUT_RT = 64 BUT_LT =", "or 1. \"\"\" self.extInstruction(0x80 + 4*id + direction*2 + value) if __name__ ==", "self.readPacket(inst, 1) else: #print \"Oxff NOT found, restart: \" + str(ord(d)) return self.readPacket(inst,", "0, value = -1): d = self.ser.read() if d == '': #print \"Fail", "= 4 BUT_L4 = 8 BUT_L5 = 16 BUT_L6 = 32 BUT_RT =", "are 0 or 1. \"\"\" self.extInstruction(0x80 + 4*id + direction*2 + value) if", "else: #print \"Oxff NOT found, restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif", "OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER", "2: c.setDigital(i-2, INPUT, LOW) time.sleep(0.25) c.setDigital(4, OUTPUT, HIGH) c.setDigital(6, OUTPUT, HIGH) # Read", "rightMotor(self, power): \"\"\" Set right motor power, -1 to 1. \"\"\" if power", "# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import time, sys, serial #", "#print \"Instruction found\" return self.readPacket(inst, 2) else: #print \"Instruction NOT found, restart: \"", "AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF", "pan = 512 tilt = 512 def __init__(self, port): self.ser = serial.Serial() self.ser.baudrate", "and power >= -1.0: self.extInstruction(0x70 + int(power*10)) def setDigital(self, id, direction, value): \"\"\"", "= 512 for i in range(20): c.tilt = 312 + i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2)", "Example # Copyright (c) 2008-2010 Vanadium Labs LLC. All right reserved. # #", "1 BUT_R2 = 2 BUT_R3 = 4 BUT_L4 = 8 BUT_L5 = 16", "if checksum % 256 != 255: #print \"Checksum ERROR\" return -1 return value", "and/or other materials provided with the distribution. # * Neither the name of", "FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS", "ports using extended mode for i in range(8): c.setDigital(i, OUTPUT, HIGH) if i", "digital ports as a single byte. \"\"\" self.extInstruction(0x1B) return self.readPacket(0x1B) def motorsOff(self): self.extInstruction(0x40)", "Instruction Set Example # Copyright (c) 2008-2010 Vanadium Labs LLC. All right reserved.", "other materials provided with the distribution. # * Neither the name of the", "ljoy_l, buttons, ext): # send output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext))", "USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY", "and the following disclaimer. # * Redistributions in binary form must reproduce the", "SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS", "range(8): c.setDigital(i, OUTPUT, HIGH) if i > 2: c.setDigital(i-2, INPUT, LOW) time.sleep(0.25) c.setDigital(4,", "d = self.ser.read() if d == '': #print \"Fail Read\" return -1 #", "self.extInstruction(0x10 + id) return self.readPacket(0x10 + id) def readDigital(self): \"\"\" Read all 8", "POSSIBILITY OF SUCH DAMAGE. import time, sys, serial # Commander definitions BUT_R1 =", "32 BUT_RT = 64 BUT_LT = 128 INPUT = 0 OUTPUT = 1", "left motor power, -1 to 1. \"\"\" if power <= 1.0 and power", "1 LOW = 0 HIGH = 1 class CommanderEXT(): NO_ACTION = 0x08 pan", "its # contributors may be used to endorse or promote products derived #", "time.sleep(.2) c.pan = 512 for i in range(20): c.tilt = 312 + i*20", "OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE #", "Labs LLC nor the names of its # contributors may be used to", "ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED", "output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self,", "0) elif mode == 1: # get our instruction if ord(d) == inst:", "self.extInstruction(0x40) def leftMotor(self, power): \"\"\" Set left motor power, -1 to 1. \"\"\"", "PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR", "reproduce the above copyright # notice, this list of conditions and the following", "is 0 to 7. value and direction are 0 or 1. \"\"\" self.extInstruction(0x80", "def setDigital(self, id, direction, value): \"\"\" Set a digital pin value. id is", "\"Checksum ERROR\" return -1 return value # fail return -1 def extInstruction(self, inst):", "port self.ser.timeout = 0.5 self.ser.open() def sendPacket(self, rjoy_h, rjoy_l, ljoy_h, ljoy_l, buttons, ext):", "be used to endorse or promote products derived # from this software without", "OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "ord(d) #print \"Checksum computed: \" + str(checksum) if checksum % 256 != 255:", "and binary forms, with or without # modification, are permitted provided that the", "names of its # contributors may be used to endorse or promote products", "<= 1.0 and power >= -1.0: self.extInstruction(0x70 + int(power*10)) def setDigital(self, id, direction,", "7. \"\"\" self.extInstruction(0x10 + id) return self.readPacket(0x10 + id) def readDigital(self): \"\"\" Read", "range(20): c.pan = 312 + i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan = 512 for i", "found: \" + str(ord(d)) checksum = inst + value + ord(d) #print \"Checksum", "modification, are permitted provided that the following conditions are met: # * Redistributions", "def readPacket(self, inst, mode = 0, value = -1): d = self.ser.read() if", "BUT_L4 = 8 BUT_L5 = 16 BUT_L6 = 32 BUT_RT = 64 BUT_LT", "128 INPUT = 0 OUTPUT = 1 LOW = 0 HIGH = 1", "following disclaimer in the # documentation and/or other materials provided with the distribution.", "+ id) def readDigital(self): \"\"\" Read all 8 digital ports as a single", "LOW) time.sleep(0.25) c.setDigital(4, OUTPUT, HIGH) c.setDigital(6, OUTPUT, HIGH) # Read analog inputs for", "-1 to 1. \"\"\" if power <= 1.0 and power >= -1.0: self.extInstruction(0x50", "analog port, id is 0 to 7. \"\"\" self.extInstruction(0x10 + id) return self.readPacket(0x10", "serial # Commander definitions BUT_R1 = 1 BUT_R2 = 2 BUT_R3 = 4", "== \"__main__\": # commanderEXT.py <serialport> c = CommanderEXT(sys.argv[1]) # Cycle digital ports using", "= 16 BUT_L6 = 32 BUT_RT = 64 BUT_LT = 128 INPUT =", "((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self, inst, mode = 0, value = -1): d = self.ser.read()", "elif mode == 1: # get our instruction if ord(d) == inst: #print", "PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE", "STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY", "SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED", "in range(8): c.setDigital(i, OUTPUT, HIGH) if i > 2: c.setDigital(i-2, INPUT, LOW) time.sleep(0.25)", "BUT_LT = 128 INPUT = 0 OUTPUT = 1 LOW = 0 HIGH", "TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "= 38400 self.ser.port = port self.ser.timeout = 0.5 self.ser.open() def sendPacket(self, rjoy_h, rjoy_l,", "+ int(power*10)) def rightMotor(self, power): \"\"\" Set right motor power, -1 to 1.", "a digital pin value. id is 0 to 7. value and direction are", "THE POSSIBILITY OF SUCH DAMAGE. import time, sys, serial # Commander definitions BUT_R1", "IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #", "of conditions and the following disclaimer in the # documentation and/or other materials", "__name__ == \"__main__\": # commanderEXT.py <serialport> c = CommanderEXT(sys.argv[1]) # Cycle digital ports", "# get our first if ord(d) == 0xff: #print \"Oxff found\" return self.readPacket(inst,", "int(power*10)) def rightMotor(self, power): \"\"\" Set right motor power, -1 to 1. \"\"\"", "#print \"Fail Read\" return -1 # now process our byte if mode ==", "= 0 HIGH = 1 class CommanderEXT(): NO_ACTION = 0x08 pan = 512", "str(checksum) if checksum % 256 != 255: #print \"Checksum ERROR\" return -1 return", "+ ord(d) #print \"Checksum computed: \" + str(checksum) if checksum % 256 !=", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #", "get our first if ord(d) == 0xff: #print \"Oxff found\" return self.readPacket(inst, 1)", "readAnalog(self, id): \"\"\" Read an analog port, id is 0 to 7. \"\"\"", "IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN", "motor power, -1 to 1. \"\"\" if power <= 1.0 and power >=", "def rightMotor(self, power): \"\"\" Set right motor power, -1 to 1. \"\"\" if", "definitions BUT_R1 = 1 BUT_R2 = 2 BUT_R3 = 4 BUT_L4 = 8", "derived # from this software without specific prior written permission. # # THIS", "\"Checksum computed: \" + str(checksum) if checksum % 256 != 255: #print \"Checksum", "if power <= 1.0 and power >= -1.0: self.extInstruction(0x70 + int(power*10)) def setDigital(self,", "FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL VANADIUM", "# Exercise turret for i in range(20): c.pan = 312 + i*20 c.extInstruction(c.NO_ACTION)", "NOT found, restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif mode == 2:", "# * Neither the name of the Vanadium Labs LLC nor the names", "value) if __name__ == \"__main__\": # commanderEXT.py <serialport> c = CommanderEXT(sys.argv[1]) # Cycle", "ARE # DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY", "id is 0 to 7. \"\"\" self.extInstruction(0x10 + id) return self.readPacket(0x10 + id)", "+ direction*2 + value) if __name__ == \"__main__\": # commanderEXT.py <serialport> c =", "#print \"Oxff found\" return self.readPacket(inst, 1) else: #print \"Oxff NOT found, restart: \"", "PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS;", "# now process our byte if mode == 0: # get our first", "self.ser.timeout = 0.5 self.ser.open() def sendPacket(self, rjoy_h, rjoy_l, ljoy_h, ljoy_l, buttons, ext): #", "0 to 7. value and direction are 0 or 1. \"\"\" self.extInstruction(0x80 +", "\"Oxff NOT found, restart: \" + str(ord(d)) return self.readPacket(inst, 0) elif mode ==", "+ value) if __name__ == \"__main__\": # commanderEXT.py <serialport> c = CommanderEXT(sys.argv[1]) #", "# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS", "1. \"\"\" if power <= 1.0 and power >= -1.0: self.extInstruction(0x50 + int(power*10))", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND #", "first if ord(d) == 0xff: #print \"Oxff found\" return self.readPacket(inst, 1) else: #print", "IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF #", "c.setDigital(4, OUTPUT, HIGH) c.setDigital(6, OUTPUT, HIGH) # Read analog inputs for i in", "digital inputs print \"Digital:\", c.readDigital() # Exercise turret for i in range(20): c.pan", "Read an analog port, id is 0 to 7. \"\"\" self.extInstruction(0x10 + id)", "power <= 1.0 and power >= -1.0: self.extInstruction(0x50 + int(power*10)) def rightMotor(self, power):", "0, inst) def readAnalog(self, id): \"\"\" Read an analog port, id is 0", "the following disclaimer in the # documentation and/or other materials provided with the", "OUTPUT = 1 LOW = 0 HIGH = 1 class CommanderEXT(): NO_ACTION =", "i in range(20): c.pan = 312 + i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan = 512", "must reproduce the above copyright # notice, this list of conditions and the", "EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE", "Read all 8 digital ports as a single byte. \"\"\" self.extInstruction(0x1B) return self.readPacket(0x1B)", "checksum % 256 != 255: #print \"Checksum ERROR\" return -1 return value #", "power <= 1.0 and power >= -1.0: self.extInstruction(0x70 + int(power*10)) def setDigital(self, id,", "value + ord(d) #print \"Checksum computed: \" + str(checksum) if checksum % 256", "Extended Instruction Set Example # Copyright (c) 2008-2010 Vanadium Labs LLC. All right", "if power <= 1.0 and power >= -1.0: self.extInstruction(0x50 + int(power*10)) def rightMotor(self,", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY,", "as a single byte. \"\"\" self.extInstruction(0x1B) return self.readPacket(0x1B) def motorsOff(self): self.extInstruction(0x40) def leftMotor(self,", "(INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "the following disclaimer. # * Redistributions in binary form must reproduce the above", "0: # get our first if ord(d) == 0xff: #print \"Oxff found\" return", "# commanderEXT.py <serialport> c = CommanderEXT(sys.argv[1]) # Cycle digital ports using extended mode", ">= -1.0: self.extInstruction(0x70 + int(power*10)) def setDigital(self, id, direction, value): \"\"\" Set a", "CommanderEXT(): NO_ACTION = 0x08 pan = 512 tilt = 512 def __init__(self, port):", "# Commander definitions BUT_R1 = 1 BUT_R2 = 2 BUT_R3 = 4 BUT_L4", "OF SUCH DAMAGE. import time, sys, serial # Commander definitions BUT_R1 = 1", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT", "list of conditions and the following disclaimer in the # documentation and/or other", "time.sleep(0.25) c.setDigital(4, OUTPUT, HIGH) c.setDigital(6, OUTPUT, HIGH) # Read analog inputs for i", "DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT #", "- ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self, inst, mode = 0, value = -1): d =", "right reserved. # # Redistribution and use in source and binary forms, with", "the names of its # contributors may be used to endorse or promote", "= 0x08 pan = 512 tilt = 512 def __init__(self, port): self.ser =", "if d == '': #print \"Fail Read\" return -1 # now process our", "following conditions are met: # * Redistributions of source code must retain the", "class CommanderEXT(): NO_ACTION = 0x08 pan = 512 tilt = 512 def __init__(self,", "self.ser.write(chr(ext)) self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self, inst, mode = 0, value = -1):", "retain the above copyright # notice, this list of conditions and the following", "38400 self.ser.port = port self.ser.timeout = 0.5 self.ser.open() def sendPacket(self, rjoy_h, rjoy_l, ljoy_h,", "SUCH DAMAGE. import time, sys, serial # Commander definitions BUT_R1 = 1 BUT_R2", "return self.readPacket(inst, 0) elif mode == 2: # get value return self.readPacket(inst, 3,", "\" + str(checksum) if checksum % 256 != 255: #print \"Checksum ERROR\" return", "0x08 pan = 512 tilt = 512 def __init__(self, port): self.ser = serial.Serial()", "-1 return value # fail return -1 def extInstruction(self, inst): self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst)", "312 + i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan = 512 for i in range(20): c.tilt", "reserved. # # Redistribution and use in source and binary forms, with or", "send output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def", "# Redistribution and use in source and binary forms, with or without #", "= CommanderEXT(sys.argv[1]) # Cycle digital ports using extended mode for i in range(8):", "inputs for i in range(8): print c.readAnalog(i) # Read digital inputs print \"Digital:\",", "setDigital(self, id, direction, value): \"\"\" Set a digital pin value. id is 0", "CommEXT.py - ArbotiX Commander Extended Instruction Set Example # Copyright (c) 2008-2010 Vanadium", "0xff: #print \"Oxff found\" return self.readPacket(inst, 1) else: #print \"Oxff NOT found, restart:", "def readAnalog(self, id): \"\"\" Read an analog port, id is 0 to 7.", "Set a digital pin value. id is 0 to 7. value and direction", "i in range(8): print c.readAnalog(i) # Read digital inputs print \"Digital:\", c.readDigital() #", "nor the names of its # contributors may be used to endorse or", "in the # documentation and/or other materials provided with the distribution. # *", "= 1 class CommanderEXT(): NO_ACTION = 0x08 pan = 512 tilt = 512", "DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;", "materials provided with the distribution. # * Neither the name of the Vanadium", "def sendPacket(self, rjoy_h, rjoy_l, ljoy_h, ljoy_l, buttons, ext): # send output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h))", "mode == 2: # get value return self.readPacket(inst, 3, ord(d)) elif mode ==", "IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT, #", "\"\"\" Set a digital pin value. id is 0 to 7. value and", "i in range(8): c.setDigital(i, OUTPUT, HIGH) if i > 2: c.setDigital(i-2, INPUT, LOW)", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF", "OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR", "Read\" return -1 # now process our byte if mode == 0: #", "print c.readAnalog(i) # Read digital inputs print \"Digital:\", c.readDigital() # Exercise turret for", "self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self, inst, mode = 0, value = -1): d", "value): \"\"\" Set a digital pin value. id is 0 to 7. value", "self.sendPacket(self.pan>>8,self.pan%256,self.tilt>>8,self.tilt%256, 0, inst) def readAnalog(self, id): \"\"\" Read an analog port, id is", "ord(d) == inst: #print \"Instruction found\" return self.readPacket(inst, 2) else: #print \"Instruction NOT", "motorsOff(self): self.extInstruction(0x40) def leftMotor(self, power): \"\"\" Set left motor power, -1 to 1.", "# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR", "NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL,", "binary forms, with or without # modification, are permitted provided that the following", "of source code must retain the above copyright # notice, this list of", "c.pan = 312 + i*20 c.extInstruction(c.NO_ACTION) time.sleep(.2) c.pan = 512 for i in", "python # CommEXT.py - ArbotiX Commander Extended Instruction Set Example # Copyright (c)", "#!/usr/bin/env python # CommEXT.py - ArbotiX Commander Extended Instruction Set Example # Copyright", "print \"Digital:\", c.readDigital() # Exercise turret for i in range(20): c.pan = 312", "from this software without specific prior written permission. # # THIS SOFTWARE IS", "self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self, inst, mode = 0, value", "is 0 to 7. \"\"\" self.extInstruction(0x10 + id) return self.readPacket(0x10 + id) def", "\"\"\" Read all 8 digital ports as a single byte. \"\"\" self.extInstruction(0x1B) return", "# notice, this list of conditions and the following disclaimer. # * Redistributions", "self.extInstruction(0x80 + 4*id + direction*2 + value) if __name__ == \"__main__\": # commanderEXT.py", "Read analog inputs for i in range(8): print c.readAnalog(i) # Read digital inputs", "id is 0 to 7. value and direction are 0 or 1. \"\"\"", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "= serial.Serial() self.ser.baudrate = 38400 self.ser.port = port self.ser.timeout = 0.5 self.ser.open() def", "DAMAGE. import time, sys, serial # Commander definitions BUT_R1 = 1 BUT_R2 =", "value return self.readPacket(inst, 3, ord(d)) elif mode == 3: # get checksum #print", "\"\"\" if power <= 1.0 and power >= -1.0: self.extInstruction(0x70 + int(power*10)) def", "direction are 0 or 1. \"\"\" self.extInstruction(0x80 + 4*id + direction*2 + value)", "-1 # now process our byte if mode == 0: # get our", "+ str(checksum) if checksum % 256 != 255: #print \"Checksum ERROR\" return -1", "inputs print \"Digital:\", c.readDigital() # Exercise turret for i in range(20): c.pan =", "promote products derived # from this software without specific prior written permission. #", "== 2: # get value return self.readPacket(inst, 3, ord(d)) elif mode == 3:", "-1): d = self.ser.read() if d == '': #print \"Fail Read\" return -1", "self.readPacket(inst, 3, ord(d)) elif mode == 3: # get checksum #print \"Checksum found:", "+ value + ord(d) #print \"Checksum computed: \" + str(checksum) if checksum %", ">= -1.0: self.extInstruction(0x50 + int(power*10)) def rightMotor(self, power): \"\"\" Set right motor power,", "the above copyright # notice, this list of conditions and the following disclaimer.", "if i > 2: c.setDigital(i-2, INPUT, LOW) time.sleep(0.25) c.setDigital(4, OUTPUT, HIGH) c.setDigital(6, OUTPUT,", "self.readPacket(inst, 2) else: #print \"Instruction NOT found, restart: \" + str(ord(d)) return self.readPacket(inst,", "2: # get value return self.readPacket(inst, 3, ord(d)) elif mode == 3: #", "to 1. \"\"\" if power <= 1.0 and power >= -1.0: self.extInstruction(0x70 +", "THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE", "SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY,", "\" + str(ord(d)) checksum = inst + value + ord(d) #print \"Checksum computed:", "power >= -1.0: self.extInstruction(0x50 + int(power*10)) def rightMotor(self, power): \"\"\" Set right motor", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN", "\"Instruction found\" return self.readPacket(inst, 2) else: #print \"Instruction NOT found, restart: \" +", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY", "FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT", "0 to 7. \"\"\" self.extInstruction(0x10 + id) return self.readPacket(0x10 + id) def readDigital(self):", "self.ser.read() if d == '': #print \"Fail Read\" return -1 # now process", "our instruction if ord(d) == inst: #print \"Instruction found\" return self.readPacket(inst, 2) else:", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR", "self.extInstruction(0x50 + int(power*10)) def rightMotor(self, power): \"\"\" Set right motor power, -1 to", "== 3: # get checksum #print \"Checksum found: \" + str(ord(d)) checksum =", "checksum #print \"Checksum found: \" + str(ord(d)) checksum = inst + value +", "4*id + direction*2 + value) if __name__ == \"__main__\": # commanderEXT.py <serialport> c", "self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256))) def readPacket(self, inst, mode =", "permitted provided that the following conditions are met: # * Redistributions of source", "ext): # send output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255 -", "+ 4*id + direction*2 + value) if __name__ == \"__main__\": # commanderEXT.py <serialport>", "written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "disclaimer. # * Redistributions in binary form must reproduce the above copyright #", "the # documentation and/or other materials provided with the distribution. # * Neither", "ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT", "== '': #print \"Fail Read\" return -1 # now process our byte if", "LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT", "get checksum #print \"Checksum found: \" + str(ord(d)) checksum = inst + value", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, #", "that the following conditions are met: # * Redistributions of source code must", "# send output self.ser.write('\\xFF') self.ser.write(chr(rjoy_h)) self.ser.write(chr(rjoy_l)) self.ser.write(chr(ljoy_h)) self.ser.write(chr(ljoy_l)) self.ser.write(chr(buttons)) self.ser.write(chr(ext)) self.ser.write(chr(255 - ((rjoy_h+rjoy_l+ljoy_h+ljoy_l+buttons+ext)%256)))", "copyright # notice, this list of conditions and the following disclaimer in the", "+ str(ord(d)) checksum = inst + value + ord(d) #print \"Checksum computed: \"", "\"Checksum found: \" + str(ord(d)) checksum = inst + value + ord(d) #print", "an analog port, id is 0 to 7. \"\"\" self.extInstruction(0x10 + id) return", "LLC nor the names of its # contributors may be used to endorse", "specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "provided with the distribution. # * Neither the name of the Vanadium Labs", "copyright # notice, this list of conditions and the following disclaimer. # *" ]
[ "except ImportError as e: print('Roboschool environments excluded, import error') try: from .opensim_envs import", "* register( id='BanditsX2-v0', kwargs = {'num_bandits' : 2}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX4-v0', kwargs", "import * from .envs import * register( id='BanditsX2-v0', kwargs = {'num_bandits' : 2},", "register( id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv' ) register( id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv' ) except", "id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv' ) register( id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv' ) register( id='OsimRun3D-v1',", "kwargs = {}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0, tags={ \"pg_complexity\": 1*1000000 }, ) except ImportError", "register( id='BanditsX2-v0', kwargs = {'num_bandits' : 2}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX4-v0', kwargs =", "import * register( id='TRLRoboschoolReacher-v1', kwargs = {}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0, tags={ \"pg_complexity\": 1*1000000", "entry_point='torch_rl.envs:BanditEnv', ) try: from .roboschool_envs import * register( id='TRLRoboschoolReacher-v1', kwargs = {}, entry_point='torch_rl.envs:RoboschoolReacher',", "kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv' ) register( id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv' ) register( id='OsimRun3D-v1', kwargs={'visualize':", "import * register( id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv' ) register( id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv'", "register( id='BanditsX8-v0', kwargs = {'num_bandits' : 8}, entry_point='torch_rl.envs:BanditEnv', ) try: from .roboschool_envs import", ") register( id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv' ) except ImportError as e: print('Opensim environments", "kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv' ) except ImportError as e: print('Opensim environments excluded, import error", "kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv' ) register( id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv' ) except ImportError as", "False}, entry_point='osim.env:Arm3DEnv' ) register( id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv' ) except ImportError as e:", "\"pg_complexity\": 1*1000000 }, ) except ImportError as e: print('Roboschool environments excluded, import error')", "= {'num_bandits' : 2}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX4-v0', kwargs = {'num_bandits' : 4},", "try: from .opensim_envs import * register( id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv' ) register( id='OsimArm3D-v1',", ") register( id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv' ) register( id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv' )", "from .logger import * from .envs import * register( id='BanditsX2-v0', kwargs = {'num_bandits'", "reward_threshold=18.0, tags={ \"pg_complexity\": 1*1000000 }, ) except ImportError as e: print('Roboschool environments excluded,", "4}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX8-v0', kwargs = {'num_bandits' : 8}, entry_point='torch_rl.envs:BanditEnv', ) try:", ".opensim_envs import * register( id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv' ) register( id='OsimArm3D-v1', kwargs={'visualize': False},", "entry_point='osim.env:Arm3DEnv' ) register( id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv' ) except ImportError as e: print('Opensim", "import * from .logger import * from .envs import * register( id='BanditsX2-v0', kwargs", "max_episode_steps=150, reward_threshold=18.0, tags={ \"pg_complexity\": 1*1000000 }, ) except ImportError as e: print('Roboschool environments", "}, ) except ImportError as e: print('Roboschool environments excluded, import error') try: from", ") try: from .roboschool_envs import * register( id='TRLRoboschoolReacher-v1', kwargs = {}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150,", ".logger import * from .envs import * register( id='BanditsX2-v0', kwargs = {'num_bandits' :", "ImportError as e: print('Roboschool environments excluded, import error') try: from .opensim_envs import *", "gym.envs.registration import register from .wrappers import * from .logger import * from .envs", "from .envs import * register( id='BanditsX2-v0', kwargs = {'num_bandits' : 2}, entry_point='torch_rl.envs:BanditEnv', )", "print('Roboschool environments excluded, import error') try: from .opensim_envs import * register( id='OsimArm2D-v1', kwargs={'visualize':", "id='BanditsX8-v0', kwargs = {'num_bandits' : 8}, entry_point='torch_rl.envs:BanditEnv', ) try: from .roboschool_envs import *", "import error') try: from .opensim_envs import * register( id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv' )", "kwargs = {'num_bandits' : 8}, entry_point='torch_rl.envs:BanditEnv', ) try: from .roboschool_envs import * register(", "register from .wrappers import * from .logger import * from .envs import *", "{'num_bandits' : 4}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX8-v0', kwargs = {'num_bandits' : 8}, entry_point='torch_rl.envs:BanditEnv',", "import register from .wrappers import * from .logger import * from .envs import", ") except ImportError as e: print('Roboschool environments excluded, import error') try: from .opensim_envs", "error') try: from .opensim_envs import * register( id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv' ) register(", "8}, entry_point='torch_rl.envs:BanditEnv', ) try: from .roboschool_envs import * register( id='TRLRoboschoolReacher-v1', kwargs = {},", "False}, entry_point='osim.env:Arm2DEnv' ) register( id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv' ) register( id='OsimRun3D-v1', kwargs={'visualize': False},", "* register( id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv' ) register( id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv' )", "register( id='BanditsX4-v0', kwargs = {'num_bandits' : 4}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX8-v0', kwargs =", "import * register( id='BanditsX2-v0', kwargs = {'num_bandits' : 2}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX4-v0',", "* from .envs import * register( id='BanditsX2-v0', kwargs = {'num_bandits' : 2}, entry_point='torch_rl.envs:BanditEnv',", "from .wrappers import * from .logger import * from .envs import * register(", "id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv' ) except ImportError as e: print('Opensim environments excluded, import", "entry_point='osim.env:Run3DEnv' ) except ImportError as e: print('Opensim environments excluded, import error ', e)", ": 4}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX8-v0', kwargs = {'num_bandits' : 8}, entry_point='torch_rl.envs:BanditEnv', )", ": 2}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX4-v0', kwargs = {'num_bandits' : 4}, entry_point='torch_rl.envs:BanditEnv', )", "* register( id='TRLRoboschoolReacher-v1', kwargs = {}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0, tags={ \"pg_complexity\": 1*1000000 },", "{'num_bandits' : 8}, entry_point='torch_rl.envs:BanditEnv', ) try: from .roboschool_envs import * register( id='TRLRoboschoolReacher-v1', kwargs", "False}, entry_point='osim.env:Run3DEnv' ) except ImportError as e: print('Opensim environments excluded, import error ',", "= {'num_bandits' : 4}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX8-v0', kwargs = {'num_bandits' : 8},", "* from .logger import * from .envs import * register( id='BanditsX2-v0', kwargs =", "id='BanditsX2-v0', kwargs = {'num_bandits' : 2}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX4-v0', kwargs = {'num_bandits'", "tags={ \"pg_complexity\": 1*1000000 }, ) except ImportError as e: print('Roboschool environments excluded, import", ".wrappers import * from .logger import * from .envs import * register( id='BanditsX2-v0',", ") register( id='BanditsX4-v0', kwargs = {'num_bandits' : 4}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX8-v0', kwargs", "from gym.envs.registration import register from .wrappers import * from .logger import * from", "from .roboschool_envs import * register( id='TRLRoboschoolReacher-v1', kwargs = {}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0, tags={", "register( id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv' ) except ImportError as e: print('Opensim environments excluded,", "= {}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0, tags={ \"pg_complexity\": 1*1000000 }, ) except ImportError as", "entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX8-v0', kwargs = {'num_bandits' : 8}, entry_point='torch_rl.envs:BanditEnv', ) try: from", "environments excluded, import error') try: from .opensim_envs import * register( id='OsimArm2D-v1', kwargs={'visualize': False},", "{'num_bandits' : 2}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX4-v0', kwargs = {'num_bandits' : 4}, entry_point='torch_rl.envs:BanditEnv',", "id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv' ) register( id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv' ) except ImportError", "entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX4-v0', kwargs = {'num_bandits' : 4}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX8-v0',", ".roboschool_envs import * register( id='TRLRoboschoolReacher-v1', kwargs = {}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0, tags={ \"pg_complexity\":", "= {'num_bandits' : 8}, entry_point='torch_rl.envs:BanditEnv', ) try: from .roboschool_envs import * register( id='TRLRoboschoolReacher-v1',", "id='TRLRoboschoolReacher-v1', kwargs = {}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0, tags={ \"pg_complexity\": 1*1000000 }, ) except", "as e: print('Roboschool environments excluded, import error') try: from .opensim_envs import * register(", ".envs import * register( id='BanditsX2-v0', kwargs = {'num_bandits' : 2}, entry_point='torch_rl.envs:BanditEnv', ) register(", "2}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX4-v0', kwargs = {'num_bandits' : 4}, entry_point='torch_rl.envs:BanditEnv', ) register(", "try: from .roboschool_envs import * register( id='TRLRoboschoolReacher-v1', kwargs = {}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0,", "excluded, import error') try: from .opensim_envs import * register( id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv'", "register( id='TRLRoboschoolReacher-v1', kwargs = {}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0, tags={ \"pg_complexity\": 1*1000000 }, )", "from .opensim_envs import * register( id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv' ) register( id='OsimArm3D-v1', kwargs={'visualize':", "register( id='OsimArm2D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm2DEnv' ) register( id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv' ) register(", "kwargs = {'num_bandits' : 2}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX4-v0', kwargs = {'num_bandits' :", "id='BanditsX4-v0', kwargs = {'num_bandits' : 4}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX8-v0', kwargs = {'num_bandits'", "1*1000000 }, ) except ImportError as e: print('Roboschool environments excluded, import error') try:", ") register( id='BanditsX8-v0', kwargs = {'num_bandits' : 8}, entry_point='torch_rl.envs:BanditEnv', ) try: from .roboschool_envs", "kwargs = {'num_bandits' : 4}, entry_point='torch_rl.envs:BanditEnv', ) register( id='BanditsX8-v0', kwargs = {'num_bandits' :", "entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0, tags={ \"pg_complexity\": 1*1000000 }, ) except ImportError as e: print('Roboschool", "{}, entry_point='torch_rl.envs:RoboschoolReacher', max_episode_steps=150, reward_threshold=18.0, tags={ \"pg_complexity\": 1*1000000 }, ) except ImportError as e:", "e: print('Roboschool environments excluded, import error') try: from .opensim_envs import * register( id='OsimArm2D-v1',", "entry_point='osim.env:Arm2DEnv' ) register( id='OsimArm3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Arm3DEnv' ) register( id='OsimRun3D-v1', kwargs={'visualize': False}, entry_point='osim.env:Run3DEnv'", ": 8}, entry_point='torch_rl.envs:BanditEnv', ) try: from .roboschool_envs import * register( id='TRLRoboschoolReacher-v1', kwargs =" ]
[ "\"search/searched.html\" def get_context_data(self, *args, **kwargs): context = super(SearchProductView, self).get_context_data(*args, **kwargs) print(context) context['query'] =", "*args, **kwargs): context = super(SearchProductView, self).get_context_data(*args, **kwargs) print(context) context['query'] = self.request.GET.get('q') return context", "return context def get_queryset(self, *args, **kwargs): request = self.request dict = request.GET query", "django.shortcuts import render from products.models import Product from django.views.generic.list import ListView from django.db.models", "*args, **kwargs): request = self.request dict = request.GET query = dict.get('q', None) if", "self.request dict = request.GET query = dict.get('q', None) if query is not None:", "products.models import Product from django.views.generic.list import ListView from django.db.models import Q class SearchProductView(ListView):", "context def get_queryset(self, *args, **kwargs): request = self.request dict = request.GET query =", "def get_queryset(self, *args, **kwargs): request = self.request dict = request.GET query = dict.get('q',", "Product from django.views.generic.list import ListView from django.db.models import Q class SearchProductView(ListView): queryset =", "Product.objects.all() template_name = \"search/searched.html\" def get_context_data(self, *args, **kwargs): context = super(SearchProductView, self).get_context_data(*args, **kwargs)", "import Q class SearchProductView(ListView): queryset = Product.objects.all() template_name = \"search/searched.html\" def get_context_data(self, *args,", "get_context_data(self, *args, **kwargs): context = super(SearchProductView, self).get_context_data(*args, **kwargs) print(context) context['query'] = self.request.GET.get('q') return", "template_name = \"search/searched.html\" def get_context_data(self, *args, **kwargs): context = super(SearchProductView, self).get_context_data(*args, **kwargs) print(context)", "get_queryset(self, *args, **kwargs): request = self.request dict = request.GET query = dict.get('q', None)", "= Product.objects.all() template_name = \"search/searched.html\" def get_context_data(self, *args, **kwargs): context = super(SearchProductView, self).get_context_data(*args,", "= request.GET query = dict.get('q', None) if query is not None: return Product.objects.search(query)", "query = dict.get('q', None) if query is not None: return Product.objects.search(query) return Product.objects.featured()", "def get_context_data(self, *args, **kwargs): context = super(SearchProductView, self).get_context_data(*args, **kwargs) print(context) context['query'] = self.request.GET.get('q')", "super(SearchProductView, self).get_context_data(*args, **kwargs) print(context) context['query'] = self.request.GET.get('q') return context def get_queryset(self, *args, **kwargs):", "print(context) context['query'] = self.request.GET.get('q') return context def get_queryset(self, *args, **kwargs): request = self.request", "= super(SearchProductView, self).get_context_data(*args, **kwargs) print(context) context['query'] = self.request.GET.get('q') return context def get_queryset(self, *args,", "**kwargs): request = self.request dict = request.GET query = dict.get('q', None) if query", "request.GET query = dict.get('q', None) if query is not None: return Product.objects.search(query) return", "**kwargs) print(context) context['query'] = self.request.GET.get('q') return context def get_queryset(self, *args, **kwargs): request =", "from django.shortcuts import render from products.models import Product from django.views.generic.list import ListView from", "dict = request.GET query = dict.get('q', None) if query is not None: return", "context = super(SearchProductView, self).get_context_data(*args, **kwargs) print(context) context['query'] = self.request.GET.get('q') return context def get_queryset(self,", "import render from products.models import Product from django.views.generic.list import ListView from django.db.models import", "= self.request dict = request.GET query = dict.get('q', None) if query is not", "= \"search/searched.html\" def get_context_data(self, *args, **kwargs): context = super(SearchProductView, self).get_context_data(*args, **kwargs) print(context) context['query']", "SearchProductView(ListView): queryset = Product.objects.all() template_name = \"search/searched.html\" def get_context_data(self, *args, **kwargs): context =", "django.db.models import Q class SearchProductView(ListView): queryset = Product.objects.all() template_name = \"search/searched.html\" def get_context_data(self,", "self).get_context_data(*args, **kwargs) print(context) context['query'] = self.request.GET.get('q') return context def get_queryset(self, *args, **kwargs): request", "ListView from django.db.models import Q class SearchProductView(ListView): queryset = Product.objects.all() template_name = \"search/searched.html\"", "from products.models import Product from django.views.generic.list import ListView from django.db.models import Q class", "render from products.models import Product from django.views.generic.list import ListView from django.db.models import Q", "from django.db.models import Q class SearchProductView(ListView): queryset = Product.objects.all() template_name = \"search/searched.html\" def", "**kwargs): context = super(SearchProductView, self).get_context_data(*args, **kwargs) print(context) context['query'] = self.request.GET.get('q') return context def", "context['query'] = self.request.GET.get('q') return context def get_queryset(self, *args, **kwargs): request = self.request dict", "import ListView from django.db.models import Q class SearchProductView(ListView): queryset = Product.objects.all() template_name =", "Q class SearchProductView(ListView): queryset = Product.objects.all() template_name = \"search/searched.html\" def get_context_data(self, *args, **kwargs):", "queryset = Product.objects.all() template_name = \"search/searched.html\" def get_context_data(self, *args, **kwargs): context = super(SearchProductView,", "= self.request.GET.get('q') return context def get_queryset(self, *args, **kwargs): request = self.request dict =", "request = self.request dict = request.GET query = dict.get('q', None) if query is", "from django.views.generic.list import ListView from django.db.models import Q class SearchProductView(ListView): queryset = Product.objects.all()", "django.views.generic.list import ListView from django.db.models import Q class SearchProductView(ListView): queryset = Product.objects.all() template_name", "import Product from django.views.generic.list import ListView from django.db.models import Q class SearchProductView(ListView): queryset", "self.request.GET.get('q') return context def get_queryset(self, *args, **kwargs): request = self.request dict = request.GET", "class SearchProductView(ListView): queryset = Product.objects.all() template_name = \"search/searched.html\" def get_context_data(self, *args, **kwargs): context" ]
[ "from covid_api import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S') def test_api() -> bool:", "news_api import check_news_version from weather_api import check_weather_version from covid_api import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s", "abort the program\"\"\" weather = False news = False covid = False if", "to date (check_news_version())\") news = True else: logging.info(\"News API version is not up", "return bool(weather and news and covid) if __name__ == '__main__': logging.info(\"Test API Module", "logging.info(\"Covid-19 API version is not up to date (check_covid_version()) - ACTION REQUIRED\") return", "to date (check_weather_version()) - ACTION REQUIRED\") if check_news_version(): logging.info(\"News API version is up", "set up and if there is an error, it is logged and the", "API version is not up to date (check_news_version()) - ACTION REQUIRED\") if check_covid_version():", "see if each API can be properly set up and if there is", "False covid = False if check_weather_version(): logging.info(\"Weather API version is up to date", "version is not up to date (check_weather_version()) - ACTION REQUIRED\") if check_news_version(): logging.info(\"News", "is up to date (check_news_version())\") news = True else: logging.info(\"News API version is", "covid = True else: logging.info(\"Covid-19 API version is not up to date (check_covid_version())", "to date (check_news_version()) - ACTION REQUIRED\") if check_covid_version(): logging.info(\"Covid-19 API version is up", "= True else: logging.info(\"Weather API version is not up to date (check_weather_version()) -", "API version is not up to date (check_covid_version()) - ACTION REQUIRED\") return bool(weather", "to date (check_covid_version())\") covid = True else: logging.info(\"Covid-19 API version is not up", "news = False covid = False if check_weather_version(): logging.info(\"Weather API version is up", "ACTION REQUIRED\") if check_news_version(): logging.info(\"News API version is up to date (check_news_version())\") news", "(check_news_version()) - ACTION REQUIRED\") if check_covid_version(): logging.info(\"Covid-19 API version is up to date", "\"\"\"this module is designed to test the version of the APIs required to", "the program\"\"\" weather = False news = False covid = False if check_weather_version():", "API version is not up to date (check_weather_version()) - ACTION REQUIRED\") if check_news_version():", "REQUIRED\") if check_covid_version(): logging.info(\"Covid-19 API version is up to date (check_covid_version())\") covid =", "not up to date (check_covid_version()) - ACTION REQUIRED\") return bool(weather and news and", "there is an error, it is logged and the user is told to", "covid_api import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S') def test_api() -> bool: \"\"\"this", "%H:%M:%S') def test_api() -> bool: \"\"\"this function checks to see if each API", "True else: logging.info(\"Covid-19 API version is not up to date (check_covid_version()) - ACTION", "be run\"\"\" import logging from news_api import check_news_version from weather_api import check_weather_version from", "date (check_covid_version()) - ACTION REQUIRED\") return bool(weather and news and covid) if __name__", "REQUIRED\") return bool(weather and news and covid) if __name__ == '__main__': logging.info(\"Test API", "datefmt='%Y-%m-%d %H:%M:%S') def test_api() -> bool: \"\"\"this function checks to see if each", "the user is told to abort the program\"\"\" weather = False news =", "def test_api() -> bool: \"\"\"this function checks to see if each API can", "the APIs required to see if they are up to date so the", "- ACTION REQUIRED\") if check_news_version(): logging.info(\"News API version is up to date (check_news_version())\")", "- ACTION REQUIRED\") if check_covid_version(): logging.info(\"Covid-19 API version is up to date (check_covid_version())\")", "up to date (check_news_version())\") news = True else: logging.info(\"News API version is not", "up to date (check_weather_version()) - ACTION REQUIRED\") if check_news_version(): logging.info(\"News API version is", "bool(weather and news and covid) if __name__ == '__main__': logging.info(\"Test API Module Tested\")", "is logged and the user is told to abort the program\"\"\" weather =", "be properly set up and if there is an error, it is logged", "can be run\"\"\" import logging from news_api import check_news_version from weather_api import check_weather_version", "API version is up to date (check_news_version())\") news = True else: logging.info(\"News API", "= False news = False covid = False if check_weather_version(): logging.info(\"Weather API version", "module is designed to test the version of the APIs required to see", "if check_news_version(): logging.info(\"News API version is up to date (check_news_version())\") news = True", "True else: logging.info(\"News API version is not up to date (check_news_version()) - ACTION", "if each API can be properly set up and if there is an", "to see if each API can be properly set up and if there", "see if they are up to date so the program can be run\"\"\"", "= False if check_weather_version(): logging.info(\"Weather API version is up to date (check_weather_version())\") weather", "import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S') def test_api() -> bool: \"\"\"this function", "True else: logging.info(\"Weather API version is not up to date (check_weather_version()) - ACTION", "logging.info(\"Weather API version is up to date (check_weather_version())\") weather = True else: logging.info(\"Weather", "else: logging.info(\"Weather API version is not up to date (check_weather_version()) - ACTION REQUIRED\")", "(check_news_version())\") news = True else: logging.info(\"News API version is not up to date", "(check_weather_version())\") weather = True else: logging.info(\"Weather API version is not up to date", "= True else: logging.info(\"Covid-19 API version is not up to date (check_covid_version()) -", "program\"\"\" weather = False news = False covid = False if check_weather_version(): logging.info(\"Weather", "False news = False covid = False if check_weather_version(): logging.info(\"Weather API version is", "from weather_api import check_weather_version from covid_api import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S')", "logging from news_api import check_news_version from weather_api import check_weather_version from covid_api import check_covid_version", "logging.info(\"News API version is not up to date (check_news_version()) - ACTION REQUIRED\") if", "function checks to see if each API can be properly set up and", "logging.info(\"Covid-19 API version is up to date (check_covid_version())\") covid = True else: logging.info(\"Covid-19", "is not up to date (check_covid_version()) - ACTION REQUIRED\") return bool(weather and news", "- ACTION REQUIRED\") return bool(weather and news and covid) if __name__ == '__main__':", "-> bool: \"\"\"this function checks to see if each API can be properly", "check_news_version from weather_api import check_weather_version from covid_api import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d", "weather_api import check_weather_version from covid_api import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S') def", "check_covid_version(): logging.info(\"Covid-19 API version is up to date (check_covid_version())\") covid = True else:", "weather = False news = False covid = False if check_weather_version(): logging.info(\"Weather API", "ACTION REQUIRED\") if check_covid_version(): logging.info(\"Covid-19 API version is up to date (check_covid_version())\") covid", "version is up to date (check_covid_version())\") covid = True else: logging.info(\"Covid-19 API version", "up and if there is an error, it is logged and the user", "(check_covid_version()) - ACTION REQUIRED\") return bool(weather and news and covid) if __name__ ==", "covid = False if check_weather_version(): logging.info(\"Weather API version is up to date (check_weather_version())\")", "API version is up to date (check_covid_version())\") covid = True else: logging.info(\"Covid-19 API", "up to date so the program can be run\"\"\" import logging from news_api", "weather = True else: logging.info(\"Weather API version is not up to date (check_weather_version())", "error, it is logged and the user is told to abort the program\"\"\"", "to date (check_covid_version()) - ACTION REQUIRED\") return bool(weather and news and covid) if", "if they are up to date so the program can be run\"\"\" import", "checks to see if each API can be properly set up and if", "if check_weather_version(): logging.info(\"Weather API version is up to date (check_weather_version())\") weather = True", "date (check_covid_version())\") covid = True else: logging.info(\"Covid-19 API version is not up to", "date (check_weather_version()) - ACTION REQUIRED\") if check_news_version(): logging.info(\"News API version is up to", "logging.info(\"News API version is up to date (check_news_version())\") news = True else: logging.info(\"News", "logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S') def test_api() -> bool: \"\"\"this function checks to", "run\"\"\" import logging from news_api import check_news_version from weather_api import check_weather_version from covid_api", "is an error, it is logged and the user is told to abort", "and if there is an error, it is logged and the user is", "bool: \"\"\"this function checks to see if each API can be properly set", "up to date (check_news_version()) - ACTION REQUIRED\") if check_covid_version(): logging.info(\"Covid-19 API version is", "news and covid) if __name__ == '__main__': logging.info(\"Test API Module Tested\") print(test_api())#tests the", "are up to date so the program can be run\"\"\" import logging from", "date (check_news_version()) - ACTION REQUIRED\") if check_covid_version(): logging.info(\"Covid-19 API version is up to", "%(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S') def test_api() -> bool: \"\"\"this function checks to see if", "not up to date (check_news_version()) - ACTION REQUIRED\") if check_covid_version(): logging.info(\"Covid-19 API version", "they are up to date so the program can be run\"\"\" import logging", "import check_news_version from weather_api import check_weather_version from covid_api import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s',", "and news and covid) if __name__ == '__main__': logging.info(\"Test API Module Tested\") print(test_api())#tests", "= False covid = False if check_weather_version(): logging.info(\"Weather API version is up to", "check_news_version(): logging.info(\"News API version is up to date (check_news_version())\") news = True else:", "check_weather_version(): logging.info(\"Weather API version is up to date (check_weather_version())\") weather = True else:", "an error, it is logged and the user is told to abort the", "version is not up to date (check_covid_version()) - ACTION REQUIRED\") return bool(weather and", "date so the program can be run\"\"\" import logging from news_api import check_news_version", "False if check_weather_version(): logging.info(\"Weather API version is up to date (check_weather_version())\") weather =", "of the APIs required to see if they are up to date so", "logged and the user is told to abort the program\"\"\" weather = False", "user is told to abort the program\"\"\" weather = False news = False", "import check_weather_version from covid_api import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S') def test_api()", "API version is up to date (check_weather_version())\") weather = True else: logging.info(\"Weather API", "is told to abort the program\"\"\" weather = False news = False covid", "version is up to date (check_weather_version())\") weather = True else: logging.info(\"Weather API version", "to abort the program\"\"\" weather = False news = False covid = False", "to date (check_weather_version())\") weather = True else: logging.info(\"Weather API version is not up", "format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S') def test_api() -> bool: \"\"\"this function checks to see", "is not up to date (check_news_version()) - ACTION REQUIRED\") if check_covid_version(): logging.info(\"Covid-19 API", "is not up to date (check_weather_version()) - ACTION REQUIRED\") if check_news_version(): logging.info(\"News API", "up to date (check_covid_version()) - ACTION REQUIRED\") return bool(weather and news and covid)", "and covid) if __name__ == '__main__': logging.info(\"Test API Module Tested\") print(test_api())#tests the function", "not up to date (check_weather_version()) - ACTION REQUIRED\") if check_news_version(): logging.info(\"News API version", "if check_covid_version(): logging.info(\"Covid-19 API version is up to date (check_covid_version())\") covid = True", "up to date (check_covid_version())\") covid = True else: logging.info(\"Covid-19 API version is not", "designed to test the version of the APIs required to see if they", "each API can be properly set up and if there is an error,", "REQUIRED\") if check_news_version(): logging.info(\"News API version is up to date (check_news_version())\") news =", "version is up to date (check_news_version())\") news = True else: logging.info(\"News API version", "from news_api import check_news_version from weather_api import check_weather_version from covid_api import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO,", "to see if they are up to date so the program can be", "API can be properly set up and if there is an error, it", "news = True else: logging.info(\"News API version is not up to date (check_news_version())", "else: logging.info(\"News API version is not up to date (check_news_version()) - ACTION REQUIRED\")", "\"\"\"this function checks to see if each API can be properly set up", "to date so the program can be run\"\"\" import logging from news_api import", "is up to date (check_weather_version())\") weather = True else: logging.info(\"Weather API version is", "= True else: logging.info(\"News API version is not up to date (check_news_version()) -", "is up to date (check_covid_version())\") covid = True else: logging.info(\"Covid-19 API version is", "else: logging.info(\"Covid-19 API version is not up to date (check_covid_version()) - ACTION REQUIRED\")", "logging.info(\"Weather API version is not up to date (check_weather_version()) - ACTION REQUIRED\") if", "up to date (check_weather_version())\") weather = True else: logging.info(\"Weather API version is not", "check_weather_version from covid_api import check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S') def test_api() ->", "date (check_news_version())\") news = True else: logging.info(\"News API version is not up to", "the version of the APIs required to see if they are up to", "if there is an error, it is logged and the user is told", "(check_covid_version())\") covid = True else: logging.info(\"Covid-19 API version is not up to date", "so the program can be run\"\"\" import logging from news_api import check_news_version from", "to test the version of the APIs required to see if they are", "version of the APIs required to see if they are up to date", "required to see if they are up to date so the program can", "test_api() -> bool: \"\"\"this function checks to see if each API can be", "is designed to test the version of the APIs required to see if", "it is logged and the user is told to abort the program\"\"\" weather", "date (check_weather_version())\") weather = True else: logging.info(\"Weather API version is not up to", "can be properly set up and if there is an error, it is", "told to abort the program\"\"\" weather = False news = False covid =", "test the version of the APIs required to see if they are up", "APIs required to see if they are up to date so the program", "version is not up to date (check_news_version()) - ACTION REQUIRED\") if check_covid_version(): logging.info(\"Covid-19", "program can be run\"\"\" import logging from news_api import check_news_version from weather_api import", "check_covid_version logging.basicConfig(filename='pysys.log',level=logging.INFO, format='%(asctime)s %(levelname)-8s%(message)s', datefmt='%Y-%m-%d %H:%M:%S') def test_api() -> bool: \"\"\"this function checks", "the program can be run\"\"\" import logging from news_api import check_news_version from weather_api", "(check_weather_version()) - ACTION REQUIRED\") if check_news_version(): logging.info(\"News API version is up to date", "import logging from news_api import check_news_version from weather_api import check_weather_version from covid_api import", "properly set up and if there is an error, it is logged and", "and the user is told to abort the program\"\"\" weather = False news", "ACTION REQUIRED\") return bool(weather and news and covid) if __name__ == '__main__': logging.info(\"Test" ]
[ "_parse_constituents(constituents): return [Constituent( name=c['name'], description=c.get('description', ''), amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed'])) for c in constituents]", "phase=float(c['phase']), speed=float(c['speed'])) for c in constituents] def to_hours(timedelta): return timedelta.total_seconds() / 3600 def", "point in time. \"\"\" assert when.tzinfo == pytz.UTC, 'datetime timezone must be UTC'", "data['height_units'] == 'metres' self.time_datum = parse_datetime(data['time_datum']) self.constituents = TidalModel._parse_constituents( data['constituents']) @staticmethod def _parse_constituents(constituents):", "time. \"\"\" assert when.tzinfo == pytz.UTC, 'datetime timezone must be UTC' t_hours =", "def __unicode__(self): return '<TidalModel, {} constituents>'.format(len(self.constituents)) def __str__(self): return self.__unicode__().encode('ascii') def _parse(self, data):", "collections import namedtuple Constituent = namedtuple( 'Constituent', 'name,description,amplitude,phase,speed') class TidalModel(object): def __init__(self, constituent_data):", "t_hours), self.constituents) return sum(amplitudes) def __unicode__(self): return '<TidalModel, {} constituents>'.format(len(self.constituents)) def __str__(self): return", "import unicode_literals import pytz import math from functools import partial from dateutil.parser import", "data): assert data['height_units'] == 'metres' self.time_datum = parse_datetime(data['time_datum']) self.constituents = TidalModel._parse_constituents( data['constituents']) @staticmethod", "given point in time. \"\"\" assert when.tzinfo == pytz.UTC, 'datetime timezone must be", "= None self._parse(constituent_data) def predict(self, when): \"\"\" Predict a tidal height at a", "[Constituent( name=c['name'], description=c.get('description', ''), amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed'])) for c in constituents] def to_hours(timedelta):", "in time. \"\"\" assert when.tzinfo == pytz.UTC, 'datetime timezone must be UTC' t_hours", "= namedtuple( 'Constituent', 'name,description,amplitude,phase,speed') class TidalModel(object): def __init__(self, constituent_data): self.constituents = None self.time_datum", "= TidalModel._parse_constituents( data['constituents']) @staticmethod def _parse_constituents(constituents): return [Constituent( name=c['name'], description=c.get('description', ''), amplitude=float(c['amplitude']), phase=float(c['phase']),", "be UTC' t_hours = to_hours(when - self.time_datum) amplitudes = map( partial(calculate_amplitude, t_hours), self.constituents)", "__str__(self): return self.__unicode__().encode('ascii') def _parse(self, data): assert data['height_units'] == 'metres' self.time_datum = parse_datetime(data['time_datum'])", "must be UTC' t_hours = to_hours(when - self.time_datum) amplitudes = map( partial(calculate_amplitude, t_hours),", "return [Constituent( name=c['name'], description=c.get('description', ''), amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed'])) for c in constituents] def", "parse_datetime from collections import namedtuple Constituent = namedtuple( 'Constituent', 'name,description,amplitude,phase,speed') class TidalModel(object): def", "from dateutil.parser import parse as parse_datetime from collections import namedtuple Constituent = namedtuple(", "def __str__(self): return self.__unicode__().encode('ascii') def _parse(self, data): assert data['height_units'] == 'metres' self.time_datum =", "return '<TidalModel, {} constituents>'.format(len(self.constituents)) def __str__(self): return self.__unicode__().encode('ascii') def _parse(self, data): assert data['height_units']", "for c in constituents] def to_hours(timedelta): return timedelta.total_seconds() / 3600 def calculate_amplitude(time_hours, constituent):", "functools import partial from dateutil.parser import parse as parse_datetime from collections import namedtuple", "Constituent = namedtuple( 'Constituent', 'name,description,amplitude,phase,speed') class TidalModel(object): def __init__(self, constituent_data): self.constituents = None", "import parse as parse_datetime from collections import namedtuple Constituent = namedtuple( 'Constituent', 'name,description,amplitude,phase,speed')", "speed=float(c['speed'])) for c in constituents] def to_hours(timedelta): return timedelta.total_seconds() / 3600 def calculate_amplitude(time_hours,", "'<TidalModel, {} constituents>'.format(len(self.constituents)) def __str__(self): return self.__unicode__().encode('ascii') def _parse(self, data): assert data['height_units'] ==", "\"\"\" Predict a tidal height at a given point in time. \"\"\" assert", "self.time_datum = parse_datetime(data['time_datum']) self.constituents = TidalModel._parse_constituents( data['constituents']) @staticmethod def _parse_constituents(constituents): return [Constituent( name=c['name'],", "dateutil.parser import parse as parse_datetime from collections import namedtuple Constituent = namedtuple( 'Constituent',", "at a given point in time. \"\"\" assert when.tzinfo == pytz.UTC, 'datetime timezone", "partial from dateutil.parser import parse as parse_datetime from collections import namedtuple Constituent =", "return timedelta.total_seconds() / 3600 def calculate_amplitude(time_hours, constituent): angle = math.radians((constituent.speed * time_hours) -", "import partial from dateutil.parser import parse as parse_datetime from collections import namedtuple Constituent", "TidalModel._parse_constituents( data['constituents']) @staticmethod def _parse_constituents(constituents): return [Constituent( name=c['name'], description=c.get('description', ''), amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed']))", "math from functools import partial from dateutil.parser import parse as parse_datetime from collections", "== 'metres' self.time_datum = parse_datetime(data['time_datum']) self.constituents = TidalModel._parse_constituents( data['constituents']) @staticmethod def _parse_constituents(constituents): return", "parse_datetime(data['time_datum']) self.constituents = TidalModel._parse_constituents( data['constituents']) @staticmethod def _parse_constituents(constituents): return [Constituent( name=c['name'], description=c.get('description', ''),", "self.constituents) return sum(amplitudes) def __unicode__(self): return '<TidalModel, {} constituents>'.format(len(self.constituents)) def __str__(self): return self.__unicode__().encode('ascii')", "sum(amplitudes) def __unicode__(self): return '<TidalModel, {} constituents>'.format(len(self.constituents)) def __str__(self): return self.__unicode__().encode('ascii') def _parse(self,", "unicode_literals import pytz import math from functools import partial from dateutil.parser import parse", "height at a given point in time. \"\"\" assert when.tzinfo == pytz.UTC, 'datetime", "def predict(self, when): \"\"\" Predict a tidal height at a given point in", "a tidal height at a given point in time. \"\"\" assert when.tzinfo ==", "calculate_amplitude(time_hours, constituent): angle = math.radians((constituent.speed * time_hours) - constituent.phase) return constituent.amplitude * math.cos(angle)", "self.time_datum = None self._parse(constituent_data) def predict(self, when): \"\"\" Predict a tidal height at", "import pytz import math from functools import partial from dateutil.parser import parse as", "'name,description,amplitude,phase,speed') class TidalModel(object): def __init__(self, constituent_data): self.constituents = None self.time_datum = None self._parse(constituent_data)", "assert data['height_units'] == 'metres' self.time_datum = parse_datetime(data['time_datum']) self.constituents = TidalModel._parse_constituents( data['constituents']) @staticmethod def", "description=c.get('description', ''), amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed'])) for c in constituents] def to_hours(timedelta): return timedelta.total_seconds()", "as parse_datetime from collections import namedtuple Constituent = namedtuple( 'Constituent', 'name,description,amplitude,phase,speed') class TidalModel(object):", "__unicode__(self): return '<TidalModel, {} constituents>'.format(len(self.constituents)) def __str__(self): return self.__unicode__().encode('ascii') def _parse(self, data): assert", "timedelta.total_seconds() / 3600 def calculate_amplitude(time_hours, constituent): angle = math.radians((constituent.speed * time_hours) - constituent.phase)", "self.constituents = TidalModel._parse_constituents( data['constituents']) @staticmethod def _parse_constituents(constituents): return [Constituent( name=c['name'], description=c.get('description', ''), amplitude=float(c['amplitude']),", "{} constituents>'.format(len(self.constituents)) def __str__(self): return self.__unicode__().encode('ascii') def _parse(self, data): assert data['height_units'] == 'metres'", "'datetime timezone must be UTC' t_hours = to_hours(when - self.time_datum) amplitudes = map(", "data['constituents']) @staticmethod def _parse_constituents(constituents): return [Constituent( name=c['name'], description=c.get('description', ''), amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed'])) for", "in constituents] def to_hours(timedelta): return timedelta.total_seconds() / 3600 def calculate_amplitude(time_hours, constituent): angle =", "constituent_data): self.constituents = None self.time_datum = None self._parse(constituent_data) def predict(self, when): \"\"\" Predict", "TidalModel(object): def __init__(self, constituent_data): self.constituents = None self.time_datum = None self._parse(constituent_data) def predict(self,", "from __future__ import unicode_literals import pytz import math from functools import partial from", "timezone must be UTC' t_hours = to_hours(when - self.time_datum) amplitudes = map( partial(calculate_amplitude,", "self.time_datum) amplitudes = map( partial(calculate_amplitude, t_hours), self.constituents) return sum(amplitudes) def __unicode__(self): return '<TidalModel,", "\"\"\" assert when.tzinfo == pytz.UTC, 'datetime timezone must be UTC' t_hours = to_hours(when", "partial(calculate_amplitude, t_hours), self.constituents) return sum(amplitudes) def __unicode__(self): return '<TidalModel, {} constituents>'.format(len(self.constituents)) def __str__(self):", "'metres' self.time_datum = parse_datetime(data['time_datum']) self.constituents = TidalModel._parse_constituents( data['constituents']) @staticmethod def _parse_constituents(constituents): return [Constituent(", "/ 3600 def calculate_amplitude(time_hours, constituent): angle = math.radians((constituent.speed * time_hours) - constituent.phase) return", "amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed'])) for c in constituents] def to_hours(timedelta): return timedelta.total_seconds() / 3600", "amplitudes = map( partial(calculate_amplitude, t_hours), self.constituents) return sum(amplitudes) def __unicode__(self): return '<TidalModel, {}", "namedtuple Constituent = namedtuple( 'Constituent', 'name,description,amplitude,phase,speed') class TidalModel(object): def __init__(self, constituent_data): self.constituents =", "= None self.time_datum = None self._parse(constituent_data) def predict(self, when): \"\"\" Predict a tidal", "- self.time_datum) amplitudes = map( partial(calculate_amplitude, t_hours), self.constituents) return sum(amplitudes) def __unicode__(self): return", "t_hours = to_hours(when - self.time_datum) amplitudes = map( partial(calculate_amplitude, t_hours), self.constituents) return sum(amplitudes)", "from functools import partial from dateutil.parser import parse as parse_datetime from collections import", "pytz import math from functools import partial from dateutil.parser import parse as parse_datetime", "self.__unicode__().encode('ascii') def _parse(self, data): assert data['height_units'] == 'metres' self.time_datum = parse_datetime(data['time_datum']) self.constituents =", "def __init__(self, constituent_data): self.constituents = None self.time_datum = None self._parse(constituent_data) def predict(self, when):", "self._parse(constituent_data) def predict(self, when): \"\"\" Predict a tidal height at a given point", "_parse(self, data): assert data['height_units'] == 'metres' self.time_datum = parse_datetime(data['time_datum']) self.constituents = TidalModel._parse_constituents( data['constituents'])", "return sum(amplitudes) def __unicode__(self): return '<TidalModel, {} constituents>'.format(len(self.constituents)) def __str__(self): return self.__unicode__().encode('ascii') def", "__future__ import unicode_literals import pytz import math from functools import partial from dateutil.parser", "return self.__unicode__().encode('ascii') def _parse(self, data): assert data['height_units'] == 'metres' self.time_datum = parse_datetime(data['time_datum']) self.constituents", "from collections import namedtuple Constituent = namedtuple( 'Constituent', 'name,description,amplitude,phase,speed') class TidalModel(object): def __init__(self,", "name=c['name'], description=c.get('description', ''), amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed'])) for c in constituents] def to_hours(timedelta): return", "None self._parse(constituent_data) def predict(self, when): \"\"\" Predict a tidal height at a given", "import namedtuple Constituent = namedtuple( 'Constituent', 'name,description,amplitude,phase,speed') class TidalModel(object): def __init__(self, constituent_data): self.constituents", "a given point in time. \"\"\" assert when.tzinfo == pytz.UTC, 'datetime timezone must", "parse as parse_datetime from collections import namedtuple Constituent = namedtuple( 'Constituent', 'name,description,amplitude,phase,speed') class", "namedtuple( 'Constituent', 'name,description,amplitude,phase,speed') class TidalModel(object): def __init__(self, constituent_data): self.constituents = None self.time_datum =", "to_hours(timedelta): return timedelta.total_seconds() / 3600 def calculate_amplitude(time_hours, constituent): angle = math.radians((constituent.speed * time_hours)", "@staticmethod def _parse_constituents(constituents): return [Constituent( name=c['name'], description=c.get('description', ''), amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed'])) for c", "Predict a tidal height at a given point in time. \"\"\" assert when.tzinfo", "3600 def calculate_amplitude(time_hours, constituent): angle = math.radians((constituent.speed * time_hours) - constituent.phase) return constituent.amplitude", "<reponame>guyt101z/tide-predictor<gh_stars>1-10 from __future__ import unicode_literals import pytz import math from functools import partial", "= parse_datetime(data['time_datum']) self.constituents = TidalModel._parse_constituents( data['constituents']) @staticmethod def _parse_constituents(constituents): return [Constituent( name=c['name'], description=c.get('description',", "= map( partial(calculate_amplitude, t_hours), self.constituents) return sum(amplitudes) def __unicode__(self): return '<TidalModel, {} constituents>'.format(len(self.constituents))", "''), amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed'])) for c in constituents] def to_hours(timedelta): return timedelta.total_seconds() /", "tidal height at a given point in time. \"\"\" assert when.tzinfo == pytz.UTC,", "c in constituents] def to_hours(timedelta): return timedelta.total_seconds() / 3600 def calculate_amplitude(time_hours, constituent): angle", "None self.time_datum = None self._parse(constituent_data) def predict(self, when): \"\"\" Predict a tidal height", "def to_hours(timedelta): return timedelta.total_seconds() / 3600 def calculate_amplitude(time_hours, constituent): angle = math.radians((constituent.speed *", "to_hours(when - self.time_datum) amplitudes = map( partial(calculate_amplitude, t_hours), self.constituents) return sum(amplitudes) def __unicode__(self):", "class TidalModel(object): def __init__(self, constituent_data): self.constituents = None self.time_datum = None self._parse(constituent_data) def", "== pytz.UTC, 'datetime timezone must be UTC' t_hours = to_hours(when - self.time_datum) amplitudes", "import math from functools import partial from dateutil.parser import parse as parse_datetime from", "predict(self, when): \"\"\" Predict a tidal height at a given point in time.", "map( partial(calculate_amplitude, t_hours), self.constituents) return sum(amplitudes) def __unicode__(self): return '<TidalModel, {} constituents>'.format(len(self.constituents)) def", "when.tzinfo == pytz.UTC, 'datetime timezone must be UTC' t_hours = to_hours(when - self.time_datum)", "pytz.UTC, 'datetime timezone must be UTC' t_hours = to_hours(when - self.time_datum) amplitudes =", "when): \"\"\" Predict a tidal height at a given point in time. \"\"\"", "def _parse(self, data): assert data['height_units'] == 'metres' self.time_datum = parse_datetime(data['time_datum']) self.constituents = TidalModel._parse_constituents(", "= to_hours(when - self.time_datum) amplitudes = map( partial(calculate_amplitude, t_hours), self.constituents) return sum(amplitudes) def", "UTC' t_hours = to_hours(when - self.time_datum) amplitudes = map( partial(calculate_amplitude, t_hours), self.constituents) return", "constituents] def to_hours(timedelta): return timedelta.total_seconds() / 3600 def calculate_amplitude(time_hours, constituent): angle = math.radians((constituent.speed", "assert when.tzinfo == pytz.UTC, 'datetime timezone must be UTC' t_hours = to_hours(when -", "def calculate_amplitude(time_hours, constituent): angle = math.radians((constituent.speed * time_hours) - constituent.phase) return constituent.amplitude *", "def _parse_constituents(constituents): return [Constituent( name=c['name'], description=c.get('description', ''), amplitude=float(c['amplitude']), phase=float(c['phase']), speed=float(c['speed'])) for c in", "'Constituent', 'name,description,amplitude,phase,speed') class TidalModel(object): def __init__(self, constituent_data): self.constituents = None self.time_datum = None", "__init__(self, constituent_data): self.constituents = None self.time_datum = None self._parse(constituent_data) def predict(self, when): \"\"\"", "constituents>'.format(len(self.constituents)) def __str__(self): return self.__unicode__().encode('ascii') def _parse(self, data): assert data['height_units'] == 'metres' self.time_datum", "self.constituents = None self.time_datum = None self._parse(constituent_data) def predict(self, when): \"\"\" Predict a" ]
[ "= pygame.font.Font(None, 75) def __init__(self, levelnumber, xmin, ymin): self.num = levelnumber self.xmin =", "import pygame class Rectangle: def collide(self, circle): x_range = range(self.xmin - circle.size, self.xmax", "Wall(Rectangle): colour = (0, 0, 0) def __init__(self, xmin, xmax, ymin, ymax): self.xmin", "__init__(self, xmin, ymin): self.xmin = xmin self.ymin = ymin self.xmax = xmin +", "= ymin + self.height def draw(self, background): pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin),", "self.colour, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) class Border: colour", "(0, 0), (0, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.width - self.thick, self.height), (self.width -", "self.num = levelnumber self.xmin = xmin self.ymin = ymin self.xmax = xmin +", "width = 125 height = 50 pygame.init() colour = WHITE fill = BLACK", "self.height), (self.width - self.thick, 0), (self.width, 0), (self.width, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.thick,", "self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) class Border: colour = (0, 0, 0)", "self.fill) background.blit(renderbackbutton, (self.xmin, self.ymin)) class Wall(Rectangle): colour = (0, 0, 0) def __init__(self,", "levelnumber, xmin, ymin): self.num = levelnumber self.xmin = xmin self.ymin = ymin self.xmax", "0), (0, 0), (0, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.width - self.thick, self.height), (self.width", "colour = WHITE fill = BLACK font = pygame.font.Font(None, 50) def __init__(self, xmin,", "background.blit(renderbackbutton, (self.xmin, self.ymin)) class Wall(Rectangle): colour = (0, 0, 0) def __init__(self, xmin,", "circle): x_range = range(self.xmin - circle.size, self.xmax + circle.size) y_range = range(self.ymin -", "75 height = 75 pygame.init() colour = WHITE fill = BLACK font =", "(self.xmin, self.ymax),], 0) renderlevelbutton = self.font.render(str(self.num), 0, self.colour, self.fill) background.blit(renderlevelbutton, (self.xmin, self.ymin)) class", "50) def __init__(self, xmin, ymin): self.xmin = xmin self.ymin = ymin self.xmax =", "x_range = range(self.thick + circle.size, self.width - self.thick - circle.size) y_range = range(self.thick", "= xmin self.xmax = xmax self.ymin = ymin self.ymax = ymax def draw(self,", "background): pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderbackbutton", "self.xmax = xmax self.ymin = ymin self.ymax = ymax def draw(self, background): pygame.draw.polygon(background,", "(0, 0, 0) width = 125 height = 50 pygame.init() colour = WHITE", "= levelnumber self.xmin = xmin self.ymin = ymin self.xmax = xmin + self.width", "width = 75 height = 75 pygame.init() colour = WHITE fill = BLACK", "pygame.init() colour = WHITE fill = BLACK font = pygame.font.Font(None, 75) def __init__(self,", "75 pygame.init() colour = WHITE fill = BLACK font = pygame.font.Font(None, 75) def", "xmin, ymin): self.num = levelnumber self.xmin = xmin self.ymin = ymin self.xmax =", "= height self.width = width def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.thick, self.thick), (self.thick,", "self.thick),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, 0), (0, 0), (0, self.height),], 0)", "+ circle.size, self.width - self.thick - circle.size) y_range = range(self.thick + circle.size, self.height", "BLACK font = pygame.font.Font(None, 50) def __init__(self, xmin, ymin): self.xmin = xmin self.ymin", "y_range = range(self.ymin - circle.size, self.ymax + circle.size) return circle.x in x_range and", "circle.x in x_range and circle.y in y_range class LevelButton(Rectangle): WHITE = (255, 255,", "255) BLACK = (0, 0, 0) width = 75 height = 75 pygame.init()", "(self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) class Border: colour = (0, 0,", "(self.width - self.thick, 0), (self.width, 0), (self.width, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height),", "def __init__(self, xmin, ymin): self.xmin = xmin self.ymin = ymin self.xmax = xmin", "(self.xmin, self.ymin), (self.xmin, self.ymax),], 0) class Border: colour = (0, 0, 0) def", "ymin): self.xmin = xmin self.ymin = ymin self.xmax = xmin + self.width self.ymax", "font = pygame.font.Font(None, 50) def __init__(self, xmin, ymin): self.xmin = xmin self.ymin =", "self.fill) background.blit(renderlevelbutton, (self.xmin, self.ymin)) class BackButton(Rectangle): WHITE = (255, 255, 255) BLACK =", "[(self.thick, self.thick), (self.thick, 0), (self.width - self.thick, 0), (self.width - self.thick, self.thick),], 0)", "= 50 pygame.init() colour = WHITE fill = BLACK font = pygame.font.Font(None, 50)", "self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderlevelbutton = self.font.render(str(self.num), 0, self.colour,", "self.thick, self.thick),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, 0), (0, 0), (0, self.height),],", "ymin self.xmax = xmin + self.width self.ymax = ymin + self.height def draw(self,", "renderlevelbutton = self.font.render(str(self.num), 0, self.colour, self.fill) background.blit(renderlevelbutton, (self.xmin, self.ymin)) class BackButton(Rectangle): WHITE =", "- self.thick - circle.size) return not (circle.x in x_range and circle.y in y_range)", "Border: colour = (0, 0, 0) def __init__(self, thickness, width, height): self.thick =", "self.ymax + circle.size) return circle.x in x_range and circle.y in y_range class LevelButton(Rectangle):", "(self.width - self.thick, 0), (self.width - self.thick, self.thick),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height),", "pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, self.height - self.thick), (self.width - self.thick, self.height -", "self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderbackbutton = self.font.render(\"BACK\", 0, self.colour,", "= (255, 255, 255) BLACK = (0, 0, 0) width = 75 height", "self.width = width def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.thick, self.thick), (self.thick, 0), (self.width", "(self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderlevelbutton = self.font.render(str(self.num), 0, self.colour, self.fill) background.blit(renderlevelbutton, (self.xmin,", "+ self.width self.ymax = ymin + self.height def draw(self, background): pygame.draw.polygon(background, self.BLACK, [(self.xmax,", "+ circle.size, self.height - self.thick - circle.size) return not (circle.x in x_range and", "self.height - self.thick), (self.width - self.thick, self.height),], 0) def collide(self, circle): x_range =", "self.colour, [(self.thick, self.height), (self.thick, 0), (0, 0), (0, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.width", "- self.thick, self.height - self.thick), (self.width - self.thick, self.height),], 0) def collide(self, circle):", "in x_range and circle.y in y_range class LevelButton(Rectangle): WHITE = (255, 255, 255)", "circle.size) y_range = range(self.ymin - circle.size, self.ymax + circle.size) return circle.x in x_range", "(self.xmin, self.ymax),], 0) class Border: colour = (0, 0, 0) def __init__(self, thickness,", "self.thick - circle.size) y_range = range(self.thick + circle.size, self.height - self.thick - circle.size)", "return circle.x in x_range and circle.y in y_range class LevelButton(Rectangle): WHITE = (255,", "circle.y in y_range class LevelButton(Rectangle): WHITE = (255, 255, 255) BLACK = (0,", "255) BLACK = (0, 0, 0) width = 125 height = 50 pygame.init()", "(self.thick, 0), (self.width - self.thick, 0), (self.width - self.thick, self.thick),], 0) pygame.draw.polygon(background, self.colour,", "0) renderbackbutton = self.font.render(\"BACK\", 0, self.colour, self.fill) background.blit(renderbackbutton, (self.xmin, self.ymin)) class Wall(Rectangle): colour", "self.ymax),], 0) class Border: colour = (0, 0, 0) def __init__(self, thickness, width,", "self.thick), (self.width - self.thick, self.height),], 0) def collide(self, circle): x_range = range(self.thick +", "= self.font.render(str(self.num), 0, self.colour, self.fill) background.blit(renderlevelbutton, (self.xmin, self.ymin)) class BackButton(Rectangle): WHITE = (255,", "self.font.render(str(self.num), 0, self.colour, self.fill) background.blit(renderlevelbutton, (self.xmin, self.ymin)) class BackButton(Rectangle): WHITE = (255, 255,", "self.thick, self.height), (self.width - self.thick, 0), (self.width, 0), (self.width, self.height),], 0) pygame.draw.polygon(background, self.colour,", "Rectangle: def collide(self, circle): x_range = range(self.xmin - circle.size, self.xmax + circle.size) y_range", "height = 75 pygame.init() colour = WHITE fill = BLACK font = pygame.font.Font(None,", "circle.size, self.xmax + circle.size) y_range = range(self.ymin - circle.size, self.ymax + circle.size) return", "= (0, 0, 0) width = 75 height = 75 pygame.init() colour =", "0, self.colour, self.fill) background.blit(renderlevelbutton, (self.xmin, self.ymin)) class BackButton(Rectangle): WHITE = (255, 255, 255)", "+ self.height def draw(self, background): pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin),", "WHITE = (255, 255, 255) BLACK = (0, 0, 0) width = 125", "xmin, xmax, ymin, ymax): self.xmin = xmin self.xmax = xmax self.ymin = ymin", "- self.thick, 0), (self.width, 0), (self.width, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick,", "class BackButton(Rectangle): WHITE = (255, 255, 255) BLACK = (0, 0, 0) width", "class Wall(Rectangle): colour = (0, 0, 0) def __init__(self, xmin, xmax, ymin, ymax):", "- self.thick, self.thick),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, 0), (0, 0), (0,", "self.thick, self.height),], 0) def collide(self, circle): x_range = range(self.thick + circle.size, self.width -", "pygame.draw.polygon(background, self.colour, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) class Border:", "font = pygame.font.Font(None, 75) def __init__(self, levelnumber, xmin, ymin): self.num = levelnumber self.xmin", "WHITE = (255, 255, 255) BLACK = (0, 0, 0) width = 75", "self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) class Border: colour = (0,", "= range(self.xmin - circle.size, self.xmax + circle.size) y_range = range(self.ymin - circle.size, self.ymax", "ymax): self.xmin = xmin self.xmax = xmax self.ymin = ymin self.ymax = ymax", "- self.thick), (self.width - self.thick, self.height),], 0) def collide(self, circle): x_range = range(self.thick", "y_range = range(self.thick + circle.size, self.height - self.thick - circle.size) return not (circle.x", "range(self.ymin - circle.size, self.ymax + circle.size) return circle.x in x_range and circle.y in", "- circle.size, self.ymax + circle.size) return circle.x in x_range and circle.y in y_range", "self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderlevelbutton = self.font.render(str(self.num), 0, self.colour, self.fill) background.blit(renderlevelbutton,", "def collide(self, circle): x_range = range(self.thick + circle.size, self.width - self.thick - circle.size)", "- circle.size) y_range = range(self.thick + circle.size, self.height - self.thick - circle.size) return", "= WHITE fill = BLACK font = pygame.font.Font(None, 75) def __init__(self, levelnumber, xmin,", "0) width = 125 height = 50 pygame.init() colour = WHITE fill =", "0) def __init__(self, thickness, width, height): self.thick = thickness self.height = height self.width", "(self.width, 0), (self.width, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, self.height - self.thick),", "self.ymin), (self.xmin, self.ymax),], 0) renderbackbutton = self.font.render(\"BACK\", 0, self.colour, self.fill) background.blit(renderbackbutton, (self.xmin, self.ymin))", "+ circle.size) y_range = range(self.ymin - circle.size, self.ymax + circle.size) return circle.x in", "self.ymin), (self.xmin, self.ymax),], 0) renderlevelbutton = self.font.render(str(self.num), 0, self.colour, self.fill) background.blit(renderlevelbutton, (self.xmin, self.ymin))", "pygame.font.Font(None, 75) def __init__(self, levelnumber, xmin, ymin): self.num = levelnumber self.xmin = xmin", "y_range class LevelButton(Rectangle): WHITE = (255, 255, 255) BLACK = (0, 0, 0)", "= ymin self.ymax = ymax def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.xmax, self.ymax), (self.xmax,", "height): self.thick = thickness self.height = height self.width = width def draw(self, background):", "background): pygame.draw.polygon(background, self.colour, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) class", "- self.thick, self.height),], 0) def collide(self, circle): x_range = range(self.thick + circle.size, self.width", "(self.xmin, self.ymax),], 0) renderbackbutton = self.font.render(\"BACK\", 0, self.colour, self.fill) background.blit(renderbackbutton, (self.xmin, self.ymin)) class", "ymin): self.num = levelnumber self.xmin = xmin self.ymin = ymin self.xmax = xmin", "colour = (0, 0, 0) def __init__(self, xmin, xmax, ymin, ymax): self.xmin =", "draw(self, background): pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0)", "self.height = height self.width = width def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.thick, self.thick),", "= (0, 0, 0) def __init__(self, thickness, width, height): self.thick = thickness self.height", "- self.thick, 0), (self.width - self.thick, self.thick),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick,", "ymin + self.height def draw(self, background): pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin,", "= xmin + self.width self.ymax = ymin + self.height def draw(self, background): pygame.draw.polygon(background,", "xmin + self.width self.ymax = ymin + self.height def draw(self, background): pygame.draw.polygon(background, self.BLACK,", "= range(self.ymin - circle.size, self.ymax + circle.size) return circle.x in x_range and circle.y", "- self.thick), (self.width - self.thick, self.height - self.thick), (self.width - self.thick, self.height),], 0)", "def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),],", "xmax self.ymin = ymin self.ymax = ymax def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.xmax,", "x_range = range(self.xmin - circle.size, self.xmax + circle.size) y_range = range(self.ymin - circle.size,", "0), (self.width, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, self.height - self.thick), (self.width", "self.height), (self.thick, 0), (0, 0), (0, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.width - self.thick,", "0, self.colour, self.fill) background.blit(renderbackbutton, (self.xmin, self.ymin)) class Wall(Rectangle): colour = (0, 0, 0)", "(self.width - self.thick, self.height - self.thick), (self.width - self.thick, self.height),], 0) def collide(self,", "(self.xmin, self.ymin)) class Wall(Rectangle): colour = (0, 0, 0) def __init__(self, xmin, xmax,", "pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderlevelbutton =", "- circle.size, self.xmax + circle.size) y_range = range(self.ymin - circle.size, self.ymax + circle.size)", "self.ymax = ymin + self.height def draw(self, background): pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax,", "circle.size, self.ymax + circle.size) return circle.x in x_range and circle.y in y_range class", "def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.thick, self.thick), (self.thick, 0), (self.width - self.thick, 0),", "self.height),], 0) def collide(self, circle): x_range = range(self.thick + circle.size, self.width - self.thick", "fill = BLACK font = pygame.font.Font(None, 50) def __init__(self, xmin, ymin): self.xmin =", "self.ymin = ymin self.ymax = ymax def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.xmax, self.ymax),", "self.colour, self.fill) background.blit(renderlevelbutton, (self.xmin, self.ymin)) class BackButton(Rectangle): WHITE = (255, 255, 255) BLACK", "0), (self.width - self.thick, 0), (self.width - self.thick, self.thick),], 0) pygame.draw.polygon(background, self.colour, [(self.thick,", "= BLACK font = pygame.font.Font(None, 75) def __init__(self, levelnumber, xmin, ymin): self.num =", "self.xmin = xmin self.xmax = xmax self.ymin = ymin self.ymax = ymax def", "[(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderlevelbutton = self.font.render(str(self.num), 0,", "BLACK = (0, 0, 0) width = 125 height = 50 pygame.init() colour", "self.thick, 0), (self.width - self.thick, self.thick),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, 0),", "self.ymin)) class BackButton(Rectangle): WHITE = (255, 255, 255) BLACK = (0, 0, 0)", "height self.width = width def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.thick, self.thick), (self.thick, 0),", "height = 50 pygame.init() colour = WHITE fill = BLACK font = pygame.font.Font(None,", "width, height): self.thick = thickness self.height = height self.width = width def draw(self,", "class Border: colour = (0, 0, 0) def __init__(self, thickness, width, height): self.thick", "draw(self, background): pygame.draw.polygon(background, self.colour, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0)", "pygame.draw.polygon(background, self.colour, [(self.thick, self.thick), (self.thick, 0), (self.width - self.thick, 0), (self.width - self.thick,", "0) pygame.draw.polygon(background, self.colour, [(self.width - self.thick, self.height), (self.width - self.thick, 0), (self.width, 0),", "self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.width - self.thick, self.height), (self.width - self.thick, 0), (self.width,", "0) renderlevelbutton = self.font.render(str(self.num), 0, self.colour, self.fill) background.blit(renderlevelbutton, (self.xmin, self.ymin)) class BackButton(Rectangle): WHITE", "(0, 0, 0) def __init__(self, thickness, width, height): self.thick = thickness self.height =", "self.xmax = xmin + self.width self.ymax = ymin + self.height def draw(self, background):", "self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, self.height - self.thick), (self.width - self.thick,", "+ circle.size) return circle.x in x_range and circle.y in y_range class LevelButton(Rectangle): WHITE", "class LevelButton(Rectangle): WHITE = (255, 255, 255) BLACK = (0, 0, 0) width", "(self.xmin, self.ymin)) class BackButton(Rectangle): WHITE = (255, 255, 255) BLACK = (0, 0,", "self.ymax),], 0) renderbackbutton = self.font.render(\"BACK\", 0, self.colour, self.fill) background.blit(renderbackbutton, (self.xmin, self.ymin)) class Wall(Rectangle):", "self.width - self.thick - circle.size) y_range = range(self.thick + circle.size, self.height - self.thick", "circle.size, self.height - self.thick - circle.size) return not (circle.x in x_range and circle.y", "= xmax self.ymin = ymin self.ymax = ymax def draw(self, background): pygame.draw.polygon(background, self.colour,", "self.height - self.thick), (self.width - self.thick, self.height - self.thick), (self.width - self.thick, self.height),],", "= 125 height = 50 pygame.init() colour = WHITE fill = BLACK font", "ymin, ymax): self.xmin = xmin self.xmax = xmax self.ymin = ymin self.ymax =", "[(self.thick, self.height), (self.thick, 0), (0, 0), (0, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.width -", "(0, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.width - self.thick, self.height), (self.width - self.thick, 0),", "renderbackbutton = self.font.render(\"BACK\", 0, self.colour, self.fill) background.blit(renderbackbutton, (self.xmin, self.ymin)) class Wall(Rectangle): colour =", "= 75 pygame.init() colour = WHITE fill = BLACK font = pygame.font.Font(None, 75)", "pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderbackbutton =", "xmin self.ymin = ymin self.xmax = xmin + self.width self.ymax = ymin +", "= ymin self.xmax = xmin + self.width self.ymax = ymin + self.height def", "circle.size) return circle.x in x_range and circle.y in y_range class LevelButton(Rectangle): WHITE =", "BLACK font = pygame.font.Font(None, 75) def __init__(self, levelnumber, xmin, ymin): self.num = levelnumber", "0) def __init__(self, xmin, xmax, ymin, ymax): self.xmin = xmin self.xmax = xmax", "pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, 0), (0, 0), (0, self.height),], 0) pygame.draw.polygon(background, self.colour,", "draw(self, background): pygame.draw.polygon(background, self.colour, [(self.thick, self.thick), (self.thick, 0), (self.width - self.thick, 0), (self.width", "[(self.thick, self.height), (self.thick, self.height - self.thick), (self.width - self.thick, self.height - self.thick), (self.width", "circle): x_range = range(self.thick + circle.size, self.width - self.thick - circle.size) y_range =", "0) class Border: colour = (0, 0, 0) def __init__(self, thickness, width, height):", "0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, self.height - self.thick), (self.width - self.thick, self.height", "= pygame.font.Font(None, 50) def __init__(self, xmin, ymin): self.xmin = xmin self.ymin = ymin", "width def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.thick, self.thick), (self.thick, 0), (self.width - self.thick,", "BackButton(Rectangle): WHITE = (255, 255, 255) BLACK = (0, 0, 0) width =", "= (255, 255, 255) BLACK = (0, 0, 0) width = 125 height", "= width def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.thick, self.thick), (self.thick, 0), (self.width -", "self.width self.ymax = ymin + self.height def draw(self, background): pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax),", "= BLACK font = pygame.font.Font(None, 50) def __init__(self, xmin, ymin): self.xmin = xmin", "self.thick, self.height - self.thick), (self.width - self.thick, self.height),], 0) def collide(self, circle): x_range", "pygame class Rectangle: def collide(self, circle): x_range = range(self.xmin - circle.size, self.xmax +", "(self.width, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, self.height - self.thick), (self.width -", "self.height def draw(self, background): pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin,", "circle.size, self.width - self.thick - circle.size) y_range = range(self.thick + circle.size, self.height -", "= (0, 0, 0) width = 125 height = 50 pygame.init() colour =", "50 pygame.init() colour = WHITE fill = BLACK font = pygame.font.Font(None, 50) def", "thickness self.height = height self.width = width def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.thick,", "levelnumber self.xmin = xmin self.ymin = ymin self.xmax = xmin + self.width self.ymax", "(self.width - self.thick, self.thick),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, 0), (0, 0),", "self.xmin = xmin self.ymin = ymin self.xmax = xmin + self.width self.ymax =", "WHITE fill = BLACK font = pygame.font.Font(None, 75) def __init__(self, levelnumber, xmin, ymin):", "pygame.draw.polygon(background, self.colour, [(self.width - self.thick, self.height), (self.width - self.thick, 0), (self.width, 0), (self.width,", "0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, 0), (0, 0), (0, self.height),], 0) pygame.draw.polygon(background,", "= thickness self.height = height self.width = width def draw(self, background): pygame.draw.polygon(background, self.colour,", "(self.width - self.thick, self.height),], 0) def collide(self, circle): x_range = range(self.thick + circle.size,", "background): pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderlevelbutton", "def draw(self, background): pygame.draw.polygon(background, self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),],", "= ymax def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin),", "def __init__(self, thickness, width, height): self.thick = thickness self.height = height self.width =", "__init__(self, thickness, width, height): self.thick = thickness self.height = height self.width = width", "thickness, width, height): self.thick = thickness self.height = height self.width = width def", "colour = WHITE fill = BLACK font = pygame.font.Font(None, 75) def __init__(self, levelnumber,", "background): pygame.draw.polygon(background, self.colour, [(self.thick, self.thick), (self.thick, 0), (self.width - self.thick, 0), (self.width -", "0), (self.width, 0), (self.width, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, self.height -", "= self.font.render(\"BACK\", 0, self.colour, self.fill) background.blit(renderbackbutton, (self.xmin, self.ymin)) class Wall(Rectangle): colour = (0,", "def collide(self, circle): x_range = range(self.xmin - circle.size, self.xmax + circle.size) y_range =", "= 75 height = 75 pygame.init() colour = WHITE fill = BLACK font", "= range(self.thick + circle.size, self.height - self.thick - circle.size) return not (circle.x in", "self.colour, [(self.thick, self.height), (self.thick, self.height - self.thick), (self.width - self.thick, self.height - self.thick),", "[(self.width - self.thick, self.height), (self.width - self.thick, 0), (self.width, 0), (self.width, self.height),], 0)", "def __init__(self, levelnumber, xmin, ymin): self.num = levelnumber self.xmin = xmin self.ymin =", "xmin, ymin): self.xmin = xmin self.ymin = ymin self.xmax = xmin + self.width", "self.colour, [(self.thick, self.thick), (self.thick, 0), (self.width - self.thick, 0), (self.width - self.thick, self.thick),],", "self.font.render(\"BACK\", 0, self.colour, self.fill) background.blit(renderbackbutton, (self.xmin, self.ymin)) class Wall(Rectangle): colour = (0, 0,", "75) def __init__(self, levelnumber, xmin, ymin): self.num = levelnumber self.xmin = xmin self.ymin", "self.ymax = ymax def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin,", "__init__(self, xmin, xmax, ymin, ymax): self.xmin = xmin self.xmax = xmax self.ymin =", "background.blit(renderlevelbutton, (self.xmin, self.ymin)) class BackButton(Rectangle): WHITE = (255, 255, 255) BLACK = (0,", "0, 0) width = 125 height = 50 pygame.init() colour = WHITE fill", "0, 0) def __init__(self, xmin, xmax, ymin, ymax): self.xmin = xmin self.xmax =", "xmin self.xmax = xmax self.ymin = ymin self.ymax = ymax def draw(self, background):", "in y_range class LevelButton(Rectangle): WHITE = (255, 255, 255) BLACK = (0, 0,", "class Rectangle: def collide(self, circle): x_range = range(self.xmin - circle.size, self.xmax + circle.size)", "x_range and circle.y in y_range class LevelButton(Rectangle): WHITE = (255, 255, 255) BLACK", "self.ymin)) class Wall(Rectangle): colour = (0, 0, 0) def __init__(self, xmin, xmax, ymin,", "(self.thick, self.height - self.thick), (self.width - self.thick, self.height - self.thick), (self.width - self.thick,", "= (0, 0, 0) def __init__(self, xmin, xmax, ymin, ymax): self.xmin = xmin", "fill = BLACK font = pygame.font.Font(None, 75) def __init__(self, levelnumber, xmin, ymin): self.num", "0, 0) width = 75 height = 75 pygame.init() colour = WHITE fill", "0) def collide(self, circle): x_range = range(self.thick + circle.size, self.width - self.thick -", "= xmin self.ymin = ymin self.xmax = xmin + self.width self.ymax = ymin", "ymin self.ymax = ymax def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.xmax, self.ymax), (self.xmax, self.ymin),", "self.thick, 0), (self.width, 0), (self.width, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, self.height", "(self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderbackbutton = self.font.render(\"BACK\", 0, self.colour, self.fill) background.blit(renderbackbutton, (self.xmin,", "self.colour, [(self.width - self.thick, self.height), (self.width - self.thick, 0), (self.width, 0), (self.width, self.height),],", "pygame.init() colour = WHITE fill = BLACK font = pygame.font.Font(None, 50) def __init__(self,", "colour = (0, 0, 0) def __init__(self, thickness, width, height): self.thick = thickness", "0), (0, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.width - self.thick, self.height), (self.width - self.thick,", "range(self.thick + circle.size, self.height - self.thick - circle.size) return not (circle.x in x_range", "LevelButton(Rectangle): WHITE = (255, 255, 255) BLACK = (0, 0, 0) width =", "WHITE fill = BLACK font = pygame.font.Font(None, 50) def __init__(self, xmin, ymin): self.xmin", "[(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) class Border: colour =", "- self.thick, self.height), (self.width - self.thick, 0), (self.width, 0), (self.width, self.height),], 0) pygame.draw.polygon(background,", "self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderbackbutton = self.font.render(\"BACK\",", "range(self.thick + circle.size, self.width - self.thick - circle.size) y_range = range(self.thick + circle.size,", "self.height - self.thick - circle.size) return not (circle.x in x_range and circle.y in", "range(self.xmin - circle.size, self.xmax + circle.size) y_range = range(self.ymin - circle.size, self.ymax +", "def __init__(self, xmin, xmax, ymin, ymax): self.xmin = xmin self.xmax = xmax self.ymin", "0), (self.width - self.thick, self.thick),], 0) pygame.draw.polygon(background, self.colour, [(self.thick, self.height), (self.thick, 0), (0,", "(self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderlevelbutton = self.font.render(str(self.num), 0, self.colour, self.fill)", "(self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderbackbutton = self.font.render(\"BACK\", 0, self.colour, self.fill)", "= range(self.thick + circle.size, self.width - self.thick - circle.size) y_range = range(self.thick +", "self.BLACK, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderlevelbutton = self.font.render(str(self.num),", "<reponame>CoffeeTableEnnui/RedCircleGame import pygame class Rectangle: def collide(self, circle): x_range = range(self.xmin - circle.size,", "self.thick), (self.width - self.thick, self.height - self.thick), (self.width - self.thick, self.height),], 0) def", "circle.size) y_range = range(self.thick + circle.size, self.height - self.thick - circle.size) return not", "self.colour, self.fill) background.blit(renderbackbutton, (self.xmin, self.ymin)) class Wall(Rectangle): colour = (0, 0, 0) def", "and circle.y in y_range class LevelButton(Rectangle): WHITE = (255, 255, 255) BLACK =", "collide(self, circle): x_range = range(self.thick + circle.size, self.width - self.thick - circle.size) y_range", "255, 255) BLACK = (0, 0, 0) width = 75 height = 75", "BLACK = (0, 0, 0) width = 75 height = 75 pygame.init() colour", "125 height = 50 pygame.init() colour = WHITE fill = BLACK font =", "(255, 255, 255) BLACK = (0, 0, 0) width = 75 height =", "- self.thick - circle.size) y_range = range(self.thick + circle.size, self.height - self.thick -", "self.thick), (self.thick, 0), (self.width - self.thick, 0), (self.width - self.thick, self.thick),], 0) pygame.draw.polygon(background,", "ymax def draw(self, background): pygame.draw.polygon(background, self.colour, [(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin,", "0) width = 75 height = 75 pygame.init() colour = WHITE fill =", "(255, 255, 255) BLACK = (0, 0, 0) width = 125 height =", "= WHITE fill = BLACK font = pygame.font.Font(None, 50) def __init__(self, xmin, ymin):", "self.xmax + circle.size) y_range = range(self.ymin - circle.size, self.ymax + circle.size) return circle.x", "xmax, ymin, ymax): self.xmin = xmin self.xmax = xmax self.ymin = ymin self.ymax", "[(self.xmax, self.ymax), (self.xmax, self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderbackbutton = self.font.render(\"BACK\", 0,", "self.ymin), (self.xmin, self.ymax),], 0) class Border: colour = (0, 0, 0) def __init__(self,", "self.height), (self.thick, self.height - self.thick), (self.width - self.thick, self.height - self.thick), (self.width -", "collide(self, circle): x_range = range(self.xmin - circle.size, self.xmax + circle.size) y_range = range(self.ymin", "(self.thick, 0), (0, 0), (0, self.height),], 0) pygame.draw.polygon(background, self.colour, [(self.width - self.thick, self.height),", "pygame.font.Font(None, 50) def __init__(self, xmin, ymin): self.xmin = xmin self.ymin = ymin self.xmax", "(0, 0, 0) width = 75 height = 75 pygame.init() colour = WHITE", "self.ymin = ymin self.xmax = xmin + self.width self.ymax = ymin + self.height", "self.ymin), (self.xmin, self.ymin), (self.xmin, self.ymax),], 0) renderbackbutton = self.font.render(\"BACK\", 0, self.colour, self.fill) background.blit(renderbackbutton,", "255, 255) BLACK = (0, 0, 0) width = 125 height = 50", "__init__(self, levelnumber, xmin, ymin): self.num = levelnumber self.xmin = xmin self.ymin = ymin", "(0, 0, 0) def __init__(self, xmin, xmax, ymin, ymax): self.xmin = xmin self.xmax", "0, 0) def __init__(self, thickness, width, height): self.thick = thickness self.height = height", "self.ymax),], 0) renderlevelbutton = self.font.render(str(self.num), 0, self.colour, self.fill) background.blit(renderlevelbutton, (self.xmin, self.ymin)) class BackButton(Rectangle):", "self.thick = thickness self.height = height self.width = width def draw(self, background): pygame.draw.polygon(background," ]
[ "max=a[i] else: pass if min>a[i]: min=a[i] else: pass print 'max = %d, min", "_ = raw_input() a = [int(e) for e in raw_input().split(' ')] d =", "pass print 'max = %d, min = %d'%(max,min) self.maximumDifference= max-min _ = raw_input()", "e in raw_input().split(' ')] d = Difference(a) d.computeDifference() # d.maximunDifference is a variable", "'max = %d, min = %d'%(max,min) self.maximumDifference= max-min _ = raw_input() a =", "range(0, len(a)): if max<a[i]: max=a[i] else: pass if min>a[i]: min=a[i] else: pass print", "= self.__elements max = a[0] min = a[0] for i in range(0, len(a)):", "= %d, min = %d'%(max,min) self.maximumDifference= max-min _ = raw_input() a = [int(e)", "self.__elements max = a[0] min = a[0] for i in range(0, len(a)): if", "else: pass print 'max = %d, min = %d'%(max,min) self.maximumDifference= max-min _ =", "def computeDifference(self): a = self.__elements max = a[0] min = a[0] for i", "if min>a[i]: min=a[i] else: pass print 'max = %d, min = %d'%(max,min) self.maximumDifference=", "raw_input() a = [int(e) for e in raw_input().split(' ')] d = Difference(a) d.computeDifference()", "computeDifference(self): a = self.__elements max = a[0] min = a[0] for i in", "self.maximumDifference= max-min _ = raw_input() a = [int(e) for e in raw_input().split(' ')]", "def __init__(self, a): self.__elements = a def computeDifference(self): a = self.__elements max =", "pass if min>a[i]: min=a[i] else: pass print 'max = %d, min = %d'%(max,min)", "= raw_input() a = [int(e) for e in raw_input().split(' ')] d = Difference(a)", "max = a[0] min = a[0] for i in range(0, len(a)): if max<a[i]:", "= a[0] min = a[0] for i in range(0, len(a)): if max<a[i]: max=a[i]", "len(a)): if max<a[i]: max=a[i] else: pass if min>a[i]: min=a[i] else: pass print 'max", "a def computeDifference(self): a = self.__elements max = a[0] min = a[0] for", "if max<a[i]: max=a[i] else: pass if min>a[i]: min=a[i] else: pass print 'max =", "= a def computeDifference(self): a = self.__elements max = a[0] min = a[0]", "min>a[i]: min=a[i] else: pass print 'max = %d, min = %d'%(max,min) self.maximumDifference= max-min", "= %d'%(max,min) self.maximumDifference= max-min _ = raw_input() a = [int(e) for e in", "min = a[0] for i in range(0, len(a)): if max<a[i]: max=a[i] else: pass", "for i in range(0, len(a)): if max<a[i]: max=a[i] else: pass if min>a[i]: min=a[i]", "max-min _ = raw_input() a = [int(e) for e in raw_input().split(' ')] d", "%d'%(max,min) self.maximumDifference= max-min _ = raw_input() a = [int(e) for e in raw_input().split('", "a = self.__elements max = a[0] min = a[0] for i in range(0,", "__init__(self, a): self.__elements = a def computeDifference(self): a = self.__elements max = a[0]", "class Difference: def __init__(self, a): self.__elements = a def computeDifference(self): a = self.__elements", "max<a[i]: max=a[i] else: pass if min>a[i]: min=a[i] else: pass print 'max = %d,", "else: pass if min>a[i]: min=a[i] else: pass print 'max = %d, min =", "a[0] min = a[0] for i in range(0, len(a)): if max<a[i]: max=a[i] else:", "a = [int(e) for e in raw_input().split(' ')] d = Difference(a) d.computeDifference() #", "in raw_input().split(' ')] d = Difference(a) d.computeDifference() # d.maximunDifference is a variable print", "#!/usr/bin/python class Difference: def __init__(self, a): self.__elements = a def computeDifference(self): a =", "in range(0, len(a)): if max<a[i]: max=a[i] else: pass if min>a[i]: min=a[i] else: pass", "= [int(e) for e in raw_input().split(' ')] d = Difference(a) d.computeDifference() # d.maximunDifference", "min = %d'%(max,min) self.maximumDifference= max-min _ = raw_input() a = [int(e) for e", "= a[0] for i in range(0, len(a)): if max<a[i]: max=a[i] else: pass if", "print 'max = %d, min = %d'%(max,min) self.maximumDifference= max-min _ = raw_input() a", "for e in raw_input().split(' ')] d = Difference(a) d.computeDifference() # d.maximunDifference is a", "a): self.__elements = a def computeDifference(self): a = self.__elements max = a[0] min", "a[0] for i in range(0, len(a)): if max<a[i]: max=a[i] else: pass if min>a[i]:", "[int(e) for e in raw_input().split(' ')] d = Difference(a) d.computeDifference() # d.maximunDifference is", "raw_input().split(' ')] d = Difference(a) d.computeDifference() # d.maximunDifference is a variable print d.maximumDifference", "self.__elements = a def computeDifference(self): a = self.__elements max = a[0] min =", "Difference: def __init__(self, a): self.__elements = a def computeDifference(self): a = self.__elements max", "i in range(0, len(a)): if max<a[i]: max=a[i] else: pass if min>a[i]: min=a[i] else:", "%d, min = %d'%(max,min) self.maximumDifference= max-min _ = raw_input() a = [int(e) for", "min=a[i] else: pass print 'max = %d, min = %d'%(max,min) self.maximumDifference= max-min _" ]
[ "= render_module.render(self.mesh) if view2tex_maps is None: view2tex_map = render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images =", "keepdim=True) return view_saliency_maps def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps): tex_saliency = torch.zeros((view_saliency_maps.shape[0], * self.tex_shape), device=self.device)", "for idx in range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx, ..., 0], view2tex_maps[idx, ..., 1])] = view_saliency_maps.squeeze(3)[idx]", "= data_module self.use_cache = use_cache self.device = self.classifier.device self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self,", "def compute_batch_view_saliency_maps(self, images): images.requires_grad_(True) scores = self.classifier.classify(images) max_score, _ = scores.max(1, keepdim=True) max_score.mean().backward()", "if view2tex_maps is None: view2tex_map = render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images = torch.cat(images, 0)", "from tqdm import tqdm from saifooler.render.sailenv_module import SailenvModule class SaliencyEstimator: def __init__(self, mesh_descriptor,", "view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() ret =", "return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if return_views: views[idx].append(images) tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0) if return_view_saliencies: view_saliencies[idx] =", "view2tex_maps = None for idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module", "return_view_saliencies: ret = (*ret, view_saliencies) if return_views: ret = (*ret, views) return ret", "torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() if return_views: return view_saliencies, views return view_saliencies def estimate_saliency_map(self, return_view_saliencies=False,", "self.compute_batch_view_saliency_maps(images) tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if return_views: views[idx].append(images) tex_saliencies[idx]", "0) view2tex_maps = view2tex_maps if view2tex_maps is not None else torch.cat(view2tex_maps_list, 0) return", "tqdm import tqdm from saifooler.render.sailenv_module import SailenvModule class SaliencyEstimator: def __init__(self, mesh_descriptor, classifier,", "self.sailenv_module: SailenvModule = sailenv_module self.data_module = data_module self.use_cache = use_cache self.device = self.classifier.device", "tex_saliency[(idx, view2tex_maps[idx, ..., 0], view2tex_maps[idx, ..., 1])] = view_saliency_maps.squeeze(3)[idx] return tex_saliency def render_batch(self,", "SailenvModule class SaliencyEstimator: def __init__(self, mesh_descriptor, classifier, p3d_module, sailenv_module, data_module, use_cache=False): self.mesh_descriptor =", "sailenv_module, data_module, use_cache=False): self.mesh_descriptor = mesh_descriptor self.mesh = self.mesh_descriptor.mesh self.classifier = classifier self.p3d_module", "= self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if return_views: views[idx].append(images) tex_saliencies[idx] = torch.cat(tex_saliencies[idx],", "classifier self.p3d_module = p3d_module self.sailenv_module: SailenvModule = sailenv_module self.data_module = data_module self.use_cache =", "self.classifier = classifier self.p3d_module = p3d_module self.sailenv_module: SailenvModule = sailenv_module self.data_module = data_module", "= torch.cat(images, 0) view2tex_maps = view2tex_maps if view2tex_maps is not None else torch.cat(view2tex_maps_list,", "in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del view_saliencies[idx] # if", "= [] view2tex_maps_list = [] render_inputs, targets = batch for render_input in render_inputs:", "= use_cache self.device = self.classifier.device self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self, images): images.requires_grad_(True) scores", "= self.classifier.classify(images) max_score, _ = scores.max(1, keepdim=True) max_score.mean().backward() view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3, keepdim=True)", "for idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del", "= view_saliency_maps.squeeze(3)[idx] return tex_saliency def render_batch(self, render_module, batch, view2tex_maps=None): images = [] view2tex_maps_list", "batch in tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps =", "render_inputs, targets = batch for render_input in render_inputs: distance, camera_azim, camera_elev = render_input[:3]", "views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() if return_views: return view_saliencies, views return view_saliencies def", "idx in range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx, ..., 0], view2tex_maps[idx, ..., 1])] = view_saliency_maps.squeeze(3)[idx] return", "lights_elev) image = render_module.render(self.mesh) if view2tex_maps is None: view2tex_map = render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device))", "idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del view_saliencies[idx]", "return_views: return view_saliencies, views return view_saliencies def estimate_saliency_map(self, return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies =", "image = render_module.render(self.mesh) if view2tex_maps is None: view2tex_map = render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images", "self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies = [[], []] view_saliencies = [[], []] views = [[], []]", "[]] view_saliencies = [[], []] views = [[], []] view2tex_maps = None for", "view_saliency_maps, view2tex_maps): tex_saliency = torch.zeros((view_saliency_maps.shape[0], * self.tex_shape), device=self.device) view2tex_maps = view2tex_maps * torch.tensor(self.tex_shape,", "__init__(self, mesh_descriptor, classifier, p3d_module, sailenv_module, data_module, use_cache=False): self.mesh_descriptor = mesh_descriptor self.mesh = self.mesh_descriptor.mesh", "is None: del view_saliencies[idx] # if unity module is not provided, just skip", "view2tex_maps[idx, ..., 0], view2tex_maps[idx, ..., 1])] = view_saliency_maps.squeeze(3)[idx] return tex_saliency def render_batch(self, render_module,", "images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps)", "= [[], []] view_saliencies = [[], []] views = [[], []] view2tex_maps =", "view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images = torch.cat(images, 0) view2tex_maps = view2tex_maps if view2tex_maps is not", "skip it continue for batch in tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module,", "return view_saliencies, views return view_saliencies def estimate_saliency_map(self, return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies = [[],", "= self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self, images): images.requires_grad_(True) scores = self.classifier.classify(images) max_score, _ = scores.max(1,", "= torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() if return_views: return view_saliencies, views return view_saliencies def estimate_saliency_map(self,", "self.data_module = data_module self.use_cache = use_cache self.device = self.classifier.device self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1] def", "lights_azim, lights_elev = render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev) image = render_module.render(self.mesh) if view2tex_maps is None:", "in tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images)", "def estimate_saliency_map(self, return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies = [[], []] view_saliencies = [[], []]", "return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0)", "None for idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None:", "batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) if return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] = torch.cat(view_saliencies[idx], 0)", "class SaliencyEstimator: def __init__(self, mesh_descriptor, classifier, p3d_module, sailenv_module, data_module, use_cache=False): self.mesh_descriptor = mesh_descriptor", "self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del tex_saliencies[idx] # if unity module", "<reponame>sailab-code/SAIFooler<filename>saifooler/saliency/saliency_estimator.py import torch from tqdm import tqdm from saifooler.render.sailenv_module import SailenvModule class SaliencyEstimator:", "view2tex_maps * torch.tensor(self.tex_shape, device=self.device) view2tex_maps = view2tex_maps.to(dtype=torch.long) for idx in range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx,", "* self.tex_shape), device=self.device) view2tex_maps = view2tex_maps * torch.tensor(self.tex_shape, device=self.device) view2tex_maps = view2tex_maps.to(dtype=torch.long) for", "torch.zeros((view_saliency_maps.shape[0], * self.tex_shape), device=self.device) view2tex_maps = view2tex_maps * torch.tensor(self.tex_shape, device=self.device) view2tex_maps = view2tex_maps.to(dtype=torch.long)", "return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() if return_views: return view_saliencies, views return view_saliencies", "position=1, desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) tex_saliency_maps =", "0) if return_view_saliencies: view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0)", "idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del tex_saliencies[idx]", "return images, view2tex_maps def estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies = [[], []] views =", "tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) if", "view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if return_views: views[idx].append(images) tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0) if", "views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() ret = (tex_saliencies,) if return_view_saliencies: ret = (*ret,", "= scores.max(1, keepdim=True) max_score.mean().backward() view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3, keepdim=True) return view_saliency_maps def convert_view2tex_saliency_maps(self,", "tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) tex_saliency_maps", "if return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx],", "self.tex_shape), device=self.device) view2tex_maps = view2tex_maps * torch.tensor(self.tex_shape, device=self.device) view2tex_maps = view2tex_maps.to(dtype=torch.long) for idx", "camera_elev = render_input[:3] render_module.look_at_mesh(distance, camera_azim, camera_elev) lights_azim, lights_elev = render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev) image", "= None for idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is", "position=0, desc=\"Module\"): if render_module is None: del view_saliencies[idx] # if unity module is", "[[], []] view_saliencies = [[], []] views = [[], []] view2tex_maps = None", "view2tex_maps if view2tex_maps is not None else torch.cat(view2tex_maps_list, 0) return images, view2tex_maps def", "max_score, _ = scores.max(1, keepdim=True) max_score.mean().backward() view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3, keepdim=True) return view_saliency_maps", "device=self.device) view2tex_maps = view2tex_maps * torch.tensor(self.tex_shape, device=self.device) view2tex_maps = view2tex_maps.to(dtype=torch.long) for idx in", "torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() ret = (tex_saliencies,) if", "if return_view_saliencies: ret = (*ret, view_saliencies) if return_views: ret = (*ret, views) return", "= view2tex_maps.to(dtype=torch.long) for idx in range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx, ..., 0], view2tex_maps[idx, ..., 1])]", "if render_module is None: del view_saliencies[idx] # if unity module is not provided,", "[]] views = [[], []] view2tex_maps = None for idx, render_module in tqdm(enumerate([self.p3d_module,", "keepdim=True) max_score.mean().backward() view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3, keepdim=True) return view_saliency_maps def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps):", "..., 1])] = view_saliency_maps.squeeze(3)[idx] return tex_saliency def render_batch(self, render_module, batch, view2tex_maps=None): images =", "= self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if", "view_saliency_maps = self.compute_batch_view_saliency_maps(images) tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if return_views:", "not None else torch.cat(view2tex_maps_list, 0) return images, view2tex_maps def estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies", "render_module.look_at_mesh(distance, camera_azim, camera_elev) lights_azim, lights_elev = render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev) image = render_module.render(self.mesh) if", "[[], []] view2tex_maps = None for idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"):", "(*ret, views) return ret def to(self, device): self.mesh = self.mesh.to(device) self.mesh.textures = self.mesh.textures.to(device)", "estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies = [[], []] views = [[], []] view2tex_maps =", "ret = (*ret, views) return ret def to(self, device): self.mesh = self.mesh.to(device) self.mesh.textures", "use_cache self.device = self.classifier.device self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self, images): images.requires_grad_(True) scores =", "desc=\"Module\"): if render_module is None: del view_saliencies[idx] # if unity module is not", "del tex_saliencies[idx] # if unity module is not provided, just skip it continue", "[] view2tex_maps_list = [] render_inputs, targets = batch for render_input in render_inputs: distance,", "return view_saliencies def estimate_saliency_map(self, return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies = [[], []] view_saliencies =", "just skip it continue for batch in tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images, view2tex_maps =", "targets = batch for render_input in render_inputs: distance, camera_azim, camera_elev = render_input[:3] render_module.look_at_mesh(distance,", "render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev) image = render_module.render(self.mesh) if view2tex_maps is None: view2tex_map = render_module.get_view2tex_map(self.mesh)", "0) self.sailenv_module.despawn_obj() ret = (tex_saliencies,) if return_view_saliencies: ret = (*ret, view_saliencies) if return_views:", "scores.max(1, keepdim=True) max_score.mean().backward() view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3, keepdim=True) return view_saliency_maps def convert_view2tex_saliency_maps(self, view_saliency_maps,", "= (tex_saliencies,) if return_view_saliencies: ret = (*ret, view_saliencies) if return_views: ret = (*ret,", "self.compute_batch_view_saliency_maps(images) if return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] =", "= view2tex_maps if view2tex_maps is not None else torch.cat(view2tex_maps_list, 0) return images, view2tex_maps", "not provided, just skip it continue for batch in tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images,", "self.mesh_descriptor = mesh_descriptor self.mesh = self.mesh_descriptor.mesh self.classifier = classifier self.p3d_module = p3d_module self.sailenv_module:", "view_saliency_maps def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps): tex_saliency = torch.zeros((view_saliency_maps.shape[0], * self.tex_shape), device=self.device) view2tex_maps =", "saifooler.render.sailenv_module import SailenvModule class SaliencyEstimator: def __init__(self, mesh_descriptor, classifier, p3d_module, sailenv_module, data_module, use_cache=False):", "= self.classifier.device self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self, images): images.requires_grad_(True) scores = self.classifier.classify(images) max_score,", "view2tex_map = render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images = torch.cat(images, 0) view2tex_maps = view2tex_maps if", "= render_input[:3] render_module.look_at_mesh(distance, camera_azim, camera_elev) lights_azim, lights_elev = render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev) image =", "return_views: views[idx].append(images) tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0) if return_view_saliencies: view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if", "torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() ret = (tex_saliencies,) if return_view_saliencies: ret = (*ret, view_saliencies) if", "unity module is not provided, just skip it continue for batch in tqdm(self.data_module.test_dataloader(),", "dim=3, keepdim=True) return view_saliency_maps def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps): tex_saliency = torch.zeros((view_saliency_maps.shape[0], * self.tex_shape),", "return view_saliency_maps def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps): tex_saliency = torch.zeros((view_saliency_maps.shape[0], * self.tex_shape), device=self.device) view2tex_maps", "= p3d_module self.sailenv_module: SailenvModule = sailenv_module self.data_module = data_module self.use_cache = use_cache self.device", "tex_saliency def render_batch(self, render_module, batch, view2tex_maps=None): images = [] view2tex_maps_list = [] render_inputs,", "= render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images = torch.cat(images, 0) view2tex_maps = view2tex_maps if view2tex_maps", "view2tex_maps.to(dtype=torch.long) for idx in range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx, ..., 0], view2tex_maps[idx, ..., 1])] =", "view2tex_maps is not None else torch.cat(view2tex_maps_list, 0) return images, view2tex_maps def estimate_view_saliency_map(self, return_views=False):", "if return_views: views[idx].append(images) tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0) if return_view_saliencies: view_saliencies[idx] = torch.cat(view_saliencies[idx], 0)", "= mesh_descriptor self.mesh = self.mesh_descriptor.mesh self.classifier = classifier self.p3d_module = p3d_module self.sailenv_module: SailenvModule", "self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies = [[], []] views = [[], []] view2tex_maps = None for", "view_saliencies) if return_views: ret = (*ret, views) return ret def to(self, device): self.mesh", "view2tex_maps=None): images = [] view2tex_maps_list = [] render_inputs, targets = batch for render_input", "view2tex_maps): tex_saliency = torch.zeros((view_saliency_maps.shape[0], * self.tex_shape), device=self.device) view2tex_maps = view2tex_maps * torch.tensor(self.tex_shape, device=self.device)", "tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del tex_saliencies[idx] # if unity", "render_module.set_lights_direction(lights_azim, lights_elev) image = render_module.render(self.mesh) if view2tex_maps is None: view2tex_map = render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map)", "device=self.device) view2tex_maps = view2tex_maps.to(dtype=torch.long) for idx in range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx, ..., 0], view2tex_maps[idx,", "= torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() ret = (tex_saliencies,) if return_view_saliencies: ret = (*ret, view_saliencies)", "images.append(image.to(self.classifier.device)) images = torch.cat(images, 0) view2tex_maps = view2tex_maps if view2tex_maps is not None", "batch, view2tex_maps=None): images = [] view2tex_maps_list = [] render_inputs, targets = batch for", "= torch.mean(images.grad.data.abs(), dim=3, keepdim=True) return view_saliency_maps def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps): tex_saliency = torch.zeros((view_saliency_maps.shape[0],", "desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps,", "del view_saliencies[idx] # if unity module is not provided, just skip it continue", "[[], []] views = [[], []] view2tex_maps = None for idx, render_module in", "0) self.sailenv_module.despawn_obj() if return_views: return view_saliencies, views return view_saliencies def estimate_saliency_map(self, return_view_saliencies=False, return_views=False):", "if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if return_views: views[idx].append(images) tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0) if return_view_saliencies: view_saliencies[idx]", "(tex_saliencies,) if return_view_saliencies: ret = (*ret, view_saliencies) if return_views: ret = (*ret, views)", "if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() ret = (tex_saliencies,) if return_view_saliencies: ret", "self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) if return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] = torch.cat(view_saliencies[idx],", "images = torch.cat(images, 0) view2tex_maps = view2tex_maps if view2tex_maps is not None else", "view_saliencies = [[], []] views = [[], []] view2tex_maps = None for idx,", "ret = (*ret, view_saliencies) if return_views: ret = (*ret, views) return ret def", "SaliencyEstimator: def __init__(self, mesh_descriptor, classifier, p3d_module, sailenv_module, data_module, use_cache=False): self.mesh_descriptor = mesh_descriptor self.mesh", "return ret def to(self, device): self.mesh = self.mesh.to(device) self.mesh.textures = self.mesh.textures.to(device) self.p3d_module.to(device) self.classifier.to(device)", "torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() if return_views: return view_saliencies,", "is None: view2tex_map = render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images = torch.cat(images, 0) view2tex_maps =", "tex_saliencies = [[], []] view_saliencies = [[], []] views = [[], []] view2tex_maps", "range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx, ..., 0], view2tex_maps[idx, ..., 1])] = view_saliency_maps.squeeze(3)[idx] return tex_saliency def", "view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() if", "position=0, desc=\"Module\"): if render_module is None: del tex_saliencies[idx] # if unity module is", "view_saliencies[idx] # if unity module is not provided, just skip it continue for", "compute_batch_view_saliency_maps(self, images): images.requires_grad_(True) scores = self.classifier.classify(images) max_score, _ = scores.max(1, keepdim=True) max_score.mean().backward() view_saliency_maps", "self.sailenv_module.despawn_obj() if return_views: return view_saliencies, views return view_saliencies def estimate_saliency_map(self, return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor)", "view_saliency_maps = self.compute_batch_view_saliency_maps(images) if return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views:", "to(self, device): self.mesh = self.mesh.to(device) self.mesh.textures = self.mesh.textures.to(device) self.p3d_module.to(device) self.classifier.to(device) self.device = device", "in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del tex_saliencies[idx] # if", "view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) if return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if", "self.classifier.device self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self, images): images.requires_grad_(True) scores = self.classifier.classify(images) max_score, _", "None: del tex_saliencies[idx] # if unity module is not provided, just skip it", "view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps)", "views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj()", "return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies = [[], []] view_saliencies = [[], []] views =", "def __init__(self, mesh_descriptor, classifier, p3d_module, sailenv_module, data_module, use_cache=False): self.mesh_descriptor = mesh_descriptor self.mesh =", "scores = self.classifier.classify(images) max_score, _ = scores.max(1, keepdim=True) max_score.mean().backward() view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3,", "= self.compute_batch_view_saliency_maps(images) if return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx]", "view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) if return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx]", "= self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) if return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps) view_saliencies[idx] =", "import SailenvModule class SaliencyEstimator: def __init__(self, mesh_descriptor, classifier, p3d_module, sailenv_module, data_module, use_cache=False): self.mesh_descriptor", "= torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() ret = (tex_saliencies,)", "camera_azim, camera_elev = render_input[:3] render_module.look_at_mesh(distance, camera_azim, camera_elev) lights_azim, lights_elev = render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev)", "0], view2tex_maps[idx, ..., 1])] = view_saliency_maps.squeeze(3)[idx] return tex_saliency def render_batch(self, render_module, batch, view2tex_maps=None):", "= (*ret, view_saliencies) if return_views: ret = (*ret, views) return ret def to(self,", "= sailenv_module self.data_module = data_module self.use_cache = use_cache self.device = self.classifier.device self.tex_shape =", "= torch.zeros((view_saliency_maps.shape[0], * self.tex_shape), device=self.device) view2tex_maps = view2tex_maps * torch.tensor(self.tex_shape, device=self.device) view2tex_maps =", "images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) if return_views: views[idx].append(images) view_saliencies[idx].append(view_saliency_maps)", "for render_input in render_inputs: distance, camera_azim, camera_elev = render_input[:3] render_module.look_at_mesh(distance, camera_azim, camera_elev) lights_azim,", "None: del view_saliencies[idx] # if unity module is not provided, just skip it", "tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del view_saliencies[idx] # if unity", "import torch from tqdm import tqdm from saifooler.render.sailenv_module import SailenvModule class SaliencyEstimator: def", "tex_saliency = torch.zeros((view_saliency_maps.shape[0], * self.tex_shape), device=self.device) view2tex_maps = view2tex_maps * torch.tensor(self.tex_shape, device=self.device) view2tex_maps", "# if unity module is not provided, just skip it continue for batch", "render_module is None: del view_saliencies[idx] # if unity module is not provided, just", "it continue for batch in tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch,", "views return view_saliencies def estimate_saliency_map(self, return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies = [[], []] view_saliencies", "desc=\"Module\"): if render_module is None: del tex_saliencies[idx] # if unity module is not", "self.device = self.classifier.device self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self, images): images.requires_grad_(True) scores = self.classifier.classify(images)", "classifier, p3d_module, sailenv_module, data_module, use_cache=False): self.mesh_descriptor = mesh_descriptor self.mesh = self.mesh_descriptor.mesh self.classifier =", "[] render_inputs, targets = batch for render_input in render_inputs: distance, camera_azim, camera_elev =", "self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if return_views: views[idx].append(images) tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0)", "self.p3d_module = p3d_module self.sailenv_module: SailenvModule = sailenv_module self.data_module = data_module self.use_cache = use_cache", "self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self, images): images.requires_grad_(True) scores = self.classifier.classify(images) max_score, _ =", "return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies = [[], []] view_saliencies = [[], []] views = [[],", "for batch in tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps", "view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if", "return_views: ret = (*ret, views) return ret def to(self, device): self.mesh = self.mesh.to(device)", "module is not provided, just skip it continue for batch in tqdm(self.data_module.test_dataloader(), position=1,", "mesh_descriptor, classifier, p3d_module, sailenv_module, data_module, use_cache=False): self.mesh_descriptor = mesh_descriptor self.mesh = self.mesh_descriptor.mesh self.classifier", "views[idx].append(images) tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0) if return_view_saliencies: view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views:", "= [] render_inputs, targets = batch for render_input in render_inputs: distance, camera_azim, camera_elev", "batch for render_input in render_inputs: distance, camera_azim, camera_elev = render_input[:3] render_module.look_at_mesh(distance, camera_azim, camera_elev)", "render_module.render(self.mesh) if view2tex_maps is None: view2tex_map = render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images = torch.cat(images,", "mesh_descriptor self.mesh = self.mesh_descriptor.mesh self.classifier = classifier self.p3d_module = p3d_module self.sailenv_module: SailenvModule =", "render_input[:3] render_module.look_at_mesh(distance, camera_azim, camera_elev) lights_azim, lights_elev = render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev) image = render_module.render(self.mesh)", "view2tex_maps = view2tex_maps * torch.tensor(self.tex_shape, device=self.device) view2tex_maps = view2tex_maps.to(dtype=torch.long) for idx in range(view_saliency_maps.shape[0]):", "distance, camera_azim, camera_elev = render_input[:3] render_module.look_at_mesh(distance, camera_azim, camera_elev) lights_azim, lights_elev = render_input[3:] render_module.set_lights_direction(lights_azim,", "render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del view_saliencies[idx] #", "view2tex_maps = view2tex_maps if view2tex_maps is not None else torch.cat(view2tex_maps_list, 0) return images,", "camera_elev) lights_azim, lights_elev = render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev) image = render_module.render(self.mesh) if view2tex_maps is", "if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() if return_views: return view_saliencies, views return", "convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps): tex_saliency = torch.zeros((view_saliency_maps.shape[0], * self.tex_shape), device=self.device) view2tex_maps = view2tex_maps *", "= classifier self.p3d_module = p3d_module self.sailenv_module: SailenvModule = sailenv_module self.data_module = data_module self.use_cache", "if render_module is None: del tex_saliencies[idx] # if unity module is not provided,", "self.classifier.classify(images) max_score, _ = scores.max(1, keepdim=True) max_score.mean().backward() view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3, keepdim=True) return", "is None: del tex_saliencies[idx] # if unity module is not provided, just skip", "if return_views: ret = (*ret, views) return ret def to(self, device): self.mesh =", "def estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies = [[], []] views = [[], []] view2tex_maps", "= batch for render_input in render_inputs: distance, camera_azim, camera_elev = render_input[:3] render_module.look_at_mesh(distance, camera_azim,", "def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps): tex_saliency = torch.zeros((view_saliency_maps.shape[0], * self.tex_shape), device=self.device) view2tex_maps = view2tex_maps", "None: view2tex_map = render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images = torch.cat(images, 0) view2tex_maps = view2tex_maps", "view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3, keepdim=True) return view_saliency_maps def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps): tex_saliency =", "lights_elev = render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev) image = render_module.render(self.mesh) if view2tex_maps is None: view2tex_map", "def to(self, device): self.mesh = self.mesh.to(device) self.mesh.textures = self.mesh.textures.to(device) self.p3d_module.to(device) self.classifier.to(device) self.device =", "= [[], []] view2tex_maps = None for idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0,", "1])] = view_saliency_maps.squeeze(3)[idx] return tex_saliency def render_batch(self, render_module, batch, view2tex_maps=None): images = []", "SailenvModule = sailenv_module self.data_module = data_module self.use_cache = use_cache self.device = self.classifier.device self.tex_shape", "tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0) if return_view_saliencies: view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx]", "tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if return_views: views[idx].append(images) tex_saliencies[idx] =", "is not provided, just skip it continue for batch in tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"):", "from saifooler.render.sailenv_module import SailenvModule class SaliencyEstimator: def __init__(self, mesh_descriptor, classifier, p3d_module, sailenv_module, data_module,", "position=1, desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) if return_views:", "None else torch.cat(view2tex_maps_list, 0) return images, view2tex_maps def estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies =", "self.mesh_descriptor.mesh self.classifier = classifier self.p3d_module = p3d_module self.sailenv_module: SailenvModule = sailenv_module self.data_module =", "is not None else torch.cat(view2tex_maps_list, 0) return images, view2tex_maps def estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor)", "(*ret, view_saliencies) if return_views: ret = (*ret, views) return ret def to(self, device):", "provided, just skip it continue for batch in tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images, view2tex_maps", "import tqdm from saifooler.render.sailenv_module import SailenvModule class SaliencyEstimator: def __init__(self, mesh_descriptor, classifier, p3d_module,", "view2tex_maps is None: view2tex_map = render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images = torch.cat(images, 0) view2tex_maps", "= render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev) image = render_module.render(self.mesh) if view2tex_maps is None: view2tex_map =", "images, view2tex_maps def estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies = [[], []] views = [[],", "torch.cat(tex_saliencies[idx], 0) if return_view_saliencies: view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx],", "images): images.requires_grad_(True) scores = self.classifier.classify(images) max_score, _ = scores.max(1, keepdim=True) max_score.mean().backward() view_saliency_maps =", "= torch.cat(tex_saliencies[idx], 0) if return_view_saliencies: view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] =", "images = [] view2tex_maps_list = [] render_inputs, targets = batch for render_input in", "[]] view2tex_maps = None for idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if", "self.mesh = self.mesh_descriptor.mesh self.classifier = classifier self.p3d_module = p3d_module self.sailenv_module: SailenvModule = sailenv_module", "view_saliencies[idx].append(view_saliency_maps) if return_views: views[idx].append(images) tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0) if return_view_saliencies: view_saliencies[idx] = torch.cat(view_saliencies[idx],", "* torch.tensor(self.tex_shape, device=self.device) view2tex_maps = view2tex_maps.to(dtype=torch.long) for idx in range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx, ...,", "if view2tex_maps is not None else torch.cat(view2tex_maps_list, 0) return images, view2tex_maps def estimate_view_saliency_map(self,", "= self.compute_batch_view_saliency_maps(images) tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if return_views: views[idx].append(images)", "0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() if return_views: return view_saliencies, views", "= (*ret, views) return ret def to(self, device): self.mesh = self.mesh.to(device) self.mesh.textures =", "render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del tex_saliencies[idx] #", "render_module is None: del tex_saliencies[idx] # if unity module is not provided, just", "in range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx, ..., 0], view2tex_maps[idx, ..., 1])] = view_saliency_maps.squeeze(3)[idx] return tex_saliency", "torch from tqdm import tqdm from saifooler.render.sailenv_module import SailenvModule class SaliencyEstimator: def __init__(self,", "return tex_saliency def render_batch(self, render_module, batch, view2tex_maps=None): images = [] view2tex_maps_list = []", "torch.mean(images.grad.data.abs(), dim=3, keepdim=True) return view_saliency_maps def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps): tex_saliency = torch.zeros((view_saliency_maps.shape[0], *", "in render_inputs: distance, camera_azim, camera_elev = render_input[:3] render_module.look_at_mesh(distance, camera_azim, camera_elev) lights_azim, lights_elev =", "p3d_module, sailenv_module, data_module, use_cache=False): self.mesh_descriptor = mesh_descriptor self.mesh = self.mesh_descriptor.mesh self.classifier = classifier", "if return_view_saliencies: view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj()", "= [[], []] views = [[], []] view2tex_maps = None for idx, render_module", "sailenv_module self.data_module = data_module self.use_cache = use_cache self.device = self.classifier.device self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1]", "view2tex_maps = view2tex_maps.to(dtype=torch.long) for idx in range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx, ..., 0], view2tex_maps[idx, ...,", "..., 0], view2tex_maps[idx, ..., 1])] = view_saliency_maps.squeeze(3)[idx] return tex_saliency def render_batch(self, render_module, batch,", "view_saliency_maps.squeeze(3)[idx] return tex_saliency def render_batch(self, render_module, batch, view2tex_maps=None): images = [] view2tex_maps_list =", "view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() if return_views:", "render_batch(self, render_module, batch, view2tex_maps=None): images = [] view2tex_maps_list = [] render_inputs, targets =", "self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies:", "render_inputs: distance, camera_azim, camera_elev = render_input[:3] render_module.look_at_mesh(distance, camera_azim, camera_elev) lights_azim, lights_elev = render_input[3:]", "_ = scores.max(1, keepdim=True) max_score.mean().backward() view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3, keepdim=True) return view_saliency_maps def", "max_score.mean().backward() view_saliency_maps = torch.mean(images.grad.data.abs(), dim=3, keepdim=True) return view_saliency_maps def convert_view2tex_saliency_maps(self, view_saliency_maps, view2tex_maps): tex_saliency", "= self.mesh_descriptor.mesh self.classifier = classifier self.p3d_module = p3d_module self.sailenv_module: SailenvModule = sailenv_module self.data_module", "def render_batch(self, render_module, batch, view2tex_maps=None): images = [] view2tex_maps_list = [] render_inputs, targets", "self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self, images): images.requires_grad_(True) scores = self.classifier.classify(images) max_score, _ = scores.max(1, keepdim=True)", "camera_azim, camera_elev) lights_azim, lights_elev = render_input[3:] render_module.set_lights_direction(lights_azim, lights_elev) image = render_module.render(self.mesh) if view2tex_maps", "use_cache=False): self.mesh_descriptor = mesh_descriptor self.mesh = self.mesh_descriptor.mesh self.classifier = classifier self.p3d_module = p3d_module", "self.sailenv_module.despawn_obj() ret = (tex_saliencies,) if return_view_saliencies: ret = (*ret, view_saliencies) if return_views: ret", "torch.cat(images, 0) view2tex_maps = view2tex_maps if view2tex_maps is not None else torch.cat(view2tex_maps_list, 0)", "ret = (tex_saliencies,) if return_view_saliencies: ret = (*ret, view_saliencies) if return_views: ret =", "return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies = [[], []] views = [[], []] view2tex_maps = None", "tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps) if return_views: views[idx].append(images) tex_saliencies[idx] = torch.cat(tex_saliencies[idx], 0) if return_view_saliencies:", "render_module, batch, view2tex_maps=None): images = [] view2tex_maps_list = [] render_inputs, targets = batch", "images.requires_grad_(True) scores = self.classifier.classify(images) max_score, _ = scores.max(1, keepdim=True) max_score.mean().backward() view_saliency_maps = torch.mean(images.grad.data.abs(),", "tex_saliencies[idx] # if unity module is not provided, just skip it continue for", "if return_views: return view_saliencies, views return view_saliencies def estimate_saliency_map(self, return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies", "torch.cat(view2tex_maps_list, 0) return images, view2tex_maps def estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies = [[], []]", "continue for batch in tqdm(self.data_module.test_dataloader(), position=1, desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps)", "view2tex_maps def estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies = [[], []] views = [[], []]", "tqdm from saifooler.render.sailenv_module import SailenvModule class SaliencyEstimator: def __init__(self, mesh_descriptor, classifier, p3d_module, sailenv_module,", "view2tex_maps_list = [] render_inputs, targets = batch for render_input in render_inputs: distance, camera_azim,", "self.sailenv_module]), position=0, desc=\"Module\"): if render_module is None: del view_saliencies[idx] # if unity module", "else torch.cat(view2tex_maps_list, 0) return images, view2tex_maps def estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies = [[],", "return_view_saliencies: view_saliencies[idx] = torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() ret", "self.use_cache = use_cache self.device = self.classifier.device self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self, images): images.requires_grad_(True)", "views = [[], []] view2tex_maps = None for idx, render_module in tqdm(enumerate([self.p3d_module, self.sailenv_module]),", "render_module.get_view2tex_map(self.mesh) view2tex_maps_list.append(view2tex_map) images.append(image.to(self.classifier.device)) images = torch.cat(images, 0) view2tex_maps = view2tex_maps if view2tex_maps is", "0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() ret = (tex_saliencies,) if return_view_saliencies:", "batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) tex_saliency_maps = self.convert_view2tex_saliency_maps(view_saliency_maps, view2tex_maps) tex_saliencies[idx].append(tex_saliency_maps) if return_view_saliencies: view_saliencies[idx].append(view_saliency_maps)", "if unity module is not provided, just skip it continue for batch in", "0) return images, view2tex_maps def estimate_view_saliency_map(self, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) view_saliencies = [[], []] views", "view2tex_maps[idx, ..., 1])] = view_saliency_maps.squeeze(3)[idx] return tex_saliency def render_batch(self, render_module, batch, view2tex_maps=None): images", "data_module, use_cache=False): self.mesh_descriptor = mesh_descriptor self.mesh = self.mesh_descriptor.mesh self.classifier = classifier self.p3d_module =", "= torch.cat(view_saliencies[idx], 0) if return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() if return_views: return", "data_module self.use_cache = use_cache self.device = self.classifier.device self.tex_shape = self.mesh.textures.maps_padded().shape[1:-1] def compute_batch_view_saliency_maps(self, images):", "p3d_module self.sailenv_module: SailenvModule = sailenv_module self.data_module = data_module self.use_cache = use_cache self.device =", "view_saliencies, views return view_saliencies def estimate_saliency_map(self, return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies = [[], []]", "ret def to(self, device): self.mesh = self.mesh.to(device) self.mesh.textures = self.mesh.textures.to(device) self.p3d_module.to(device) self.classifier.to(device) self.device", "view_saliencies def estimate_saliency_map(self, return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies = [[], []] view_saliencies = [[],", "render_input in render_inputs: distance, camera_azim, camera_elev = render_input[:3] render_module.look_at_mesh(distance, camera_azim, camera_elev) lights_azim, lights_elev", "desc=\"Batch\"): images, view2tex_maps = self.render_batch(render_module, batch, view2tex_maps) view_saliency_maps = self.compute_batch_view_saliency_maps(images) if return_views: views[idx].append(images)", "estimate_saliency_map(self, return_view_saliencies=False, return_views=False): self.sailenv_module.spawn_obj(self.mesh_descriptor) tex_saliencies = [[], []] view_saliencies = [[], []] views", "= view2tex_maps * torch.tensor(self.tex_shape, device=self.device) view2tex_maps = view2tex_maps.to(dtype=torch.long) for idx in range(view_saliency_maps.shape[0]): tex_saliency[(idx,", "return_views: views[idx] = torch.cat(views[idx], 0) self.sailenv_module.despawn_obj() ret = (tex_saliencies,) if return_view_saliencies: ret =", "torch.tensor(self.tex_shape, device=self.device) view2tex_maps = view2tex_maps.to(dtype=torch.long) for idx in range(view_saliency_maps.shape[0]): tex_saliency[(idx, view2tex_maps[idx, ..., 0],", "views) return ret def to(self, device): self.mesh = self.mesh.to(device) self.mesh.textures = self.mesh.textures.to(device) self.p3d_module.to(device)" ]
[ "datetime import pandas as pd from typing import Any, Dict, List, Tuple class", "p_name for p_name in plugin_meta}) # EDGES df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for v", "-> Dict[str, Any]: \"\"\" Raw CredResult JSON data \"\"\" return self.cred_data def get_node(self,", "['credTimestampMs', 'amount'] return self.cache['df_grain'] def get_cred_over_time(self) -> pd.DataFrame: \"\"\" Returns distributed cred summary", "cred graph. \"\"\" if self.cache['df_cred_eflow'] is None: def set_plugin(label): for prefix, plugin in", "return [self.get_node(i) for i in range(self.total_nodes)] @property def intervals(self, to_datetime=False) -> List[Any]: \"\"\"", "p_name in plugin_meta}) # EDGES df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()],", "e in edges: cred_edges[e.replace('\\x00', '')] = [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T", "= [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18} \\ for acc in grain_history for", "'amount'] return self.cache['df_grain'] def get_cred_over_time(self) -> pd.DataFrame: \"\"\" Returns distributed cred summary over", "'IDENTITY'] def get_user_ranking(self) -> pd.DataFrame: \"\"\" Returns the user raking by total amount", "\"\"\" Returns specifc node's information \"\"\" node = dict() address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source']", "is None: if self.cache['df'] is None: self.to_df() self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns", "# self.df['credShare'] = self.df.totalCred / distributedCred return self.cache['df'] def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]:", "nodes & edges in the cred graph. \"\"\" if self.cache['df_cred_eflow'] is None: def", "self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred) * 100 df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']] distributed_cred", "cred \"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'].totalCred.sum() @property def distributed_grain(self) ->", "distributed cred summary over all intervals \"\"\" if self.cache['df_cred_ot'] is None: if self.cache['df']", "all nodes data as a DataFrame \"\"\" if self.cache['df'] is None: self.cache['df'] =", "\"\"\" Returns timestamp intervals where cred was computed \"\"\" return self.cred_data['intervals'] def get_dt_intervals(self)", "= pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True) return self.cache['df_cred_ot']", "class CredData(): \"\"\" Parses information from Sourcecred - Works with TimelineCred data format", "distributed_cred = self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred) * 100 df_rank_p =", "of nodes (users, posts, etc) in the graph \"\"\" return len(self.cred_data['nodeSummaries']) @property def", "in plugin['nodeTypes']], } edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']]) # for et in plugin_meta[plugin['name']]['edgeTypes']:", "-> pd.DataFrame: \"\"\" Returns user accounts info from 'output/accounts.json' file \"\"\" if self.cache['df_accounts']", "\"\"\" if self.cache['df_grain'] is None: self.get_grain_distribution() return self.cache['df_grain'].amount.sum() @property def accounts(self) -> pd.DataFrame:", "self.cache['df_grain'] is None: self.get_grain_distribution() return self.cache['df_grain'].amount.sum() @property def accounts(self) -> pd.DataFrame: \"\"\" Returns", "None: self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18 self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) /", "in plugin_meta}) # EDGES df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards']", "node['address.nodeType'] == 'IDENTITY' else None return node @property def total_nodes(self) -> int: \"\"\"", "def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Gets cred flow through nodes & edges", "self.weighted_graph def get_cred_data(self) -> Dict[str, Any]: \"\"\" Raw CredResult JSON data \"\"\" return", "CredData(): \"\"\" Parses information from Sourcecred - Works with TimelineCred data format (sourcecred", "self.cache['df'] is None: self.to_df() self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount']", "\"\"\" Returns the history of grain distribution \"\"\" if self.cache['df_grain'] is None: grain_history", "self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns = ['edge', 'backward', 'forward'] cred_edges = dict() for e in", "is None: self.cache['df'] = pd.json_normalize(self.nodes) self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms') # distributedCred = self.df.totalCred.sum()", "self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) # distributed_cred = self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred) *", "graph \"\"\" return [self.get_node(i) for i in range(self.total_nodes)] @property def intervals(self, to_datetime=False) ->", "for acc in grain_history for record in acc['account']['allocationHistory']] self.cache['df_grain'] = pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] =", "# distributed_cred = self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred) * 100 df_rank_p", "inplace=True) # NODES df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns = ['node', 'weight'] cred_nodes", "\"\"\" Raw CredResult JSON data \"\"\" return self.cred_data def get_node(self, i: int) ->", "self.cache['df_rank'] = df_acc_p.join(df_rank_p, on='account.identity.id', how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns = ['id', 'user', 'type', 'active',", "-> pd.DataFrame: \"\"\" Returns the history of grain distribution \"\"\" if self.cache['df_grain'] is", "plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'], 'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']} for et in plugin['edgeTypes']], 'nodeTypes':", "dict() for n in nodes: cred_nodes[n.replace('\\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns", "posts, etc) in the graph \"\"\" return len(self.cred_data['nodeSummaries']) @property def nodes(self) -> List[Any]:", "pd.DataFrame: \"\"\" Returns user nodes in the graph \"\"\" if self.cache['df'] is None:", "@property def total_nodes(self) -> int: \"\"\" Total amount of nodes (users, posts, etc)", "self.cache['df_grain'] is None: grain_history = [acc for acc in self.accounts_data['accounts'] if 'allocationHistory' in", "self.cache['df_cred_eflow'] is None: def set_plugin(label): for prefix, plugin in plugin_prefixes.items(): if label.startswith(prefix): return", "['id', 'user', 'type', 'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare'] return self.cache['df_rank'] def get_grain_distribution(self)", "cred_data, accounts_data): self.cred_json_data = cred_data self.weighted_graph = cred_data[1]['weightedGraph'][1] self.cred_data = cred_data[1]['credData'] self.accounts_data =", "else [] node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType']", "if self.cache['df'] is None: self.to_df() return self.cache['df'].totalCred.sum() @property def distributed_grain(self) -> float: \"\"\"", "the user raking by total amount of cred gained so far \"\"\" if", "'totalCred', 'credOverTime', 'credShare'] return self.cache['df_rank'] def get_grain_distribution(self) -> pd.DataFrame: \"\"\" Returns the history", "as pd from typing import Any, Dict, List, Tuple class CredData(): \"\"\" Parses", "node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else [] node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs']", "node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY'", "self.cache['df'] def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Gets cred flow through nodes &", "== 'IDENTITY' else None return node @property def total_nodes(self) -> int: \"\"\" Total", "= f'{address[0]}/{address[1]}' node['address.nodeType'] = address[2] node['address.id'] = address[3] node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] =", "Any, Dict, List, Tuple class CredData(): \"\"\" Parses information from Sourcecred - Works", "Returns all nodes in the graph \"\"\" return [self.get_node(i) for i in range(self.total_nodes)]", "pd.json_normalize(self.nodes) self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms') # distributedCred = self.df.totalCred.sum() # self.df['credShare'] = self.df.totalCred", "cred flow through nodes & edges in the cred graph. \"\"\" if self.cache['df_cred_eflow']", "None, 'df_cred_ot': None, 'df_cred_eflow': None, 'df_cred_nflow': None, } def get_weighted_graph(self, data) -> Dict[str,", "get_node(self, i: int) -> Dict[str, Any]: \"\"\" Returns specifc node's information \"\"\" node", "return len(self.cred_data['nodeSummaries']) @property def nodes(self) -> List[Any]: \"\"\" Returns all nodes in the", "'weight': et['defaultWeight']} for et in plugin['edgeTypes']], 'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for nt", "__repr__(self) -> str: return \"<{} - ({} nodes & {} distributed CRED)>\".format(self.__class__.__name__, self.total_nodes,", "edges = [] nodes = [] # edges_weights = dict() # nodes_weights =", "None: self.cache['df'] = pd.json_normalize(self.nodes) self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms') # distributedCred = self.df.totalCred.sum() #", "'')] = [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x:", "= self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY' else", "grain distribution \"\"\" if self.cache['df_grain'] is None: grain_history = [acc for acc in", "plugin_meta}) # EDGES df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for", "self.cache['df'].totalCred.sum() @property def distributed_grain(self) -> float: \"\"\" Returns total distributed grain \"\"\" if", "accounts info from 'output/accounts.json' file \"\"\" if self.cache['df_accounts'] is None: self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts'])", "'IDENTITY' else None return node @property def total_nodes(self) -> int: \"\"\" Total amount", "et in plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']] = et['weight'] nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']]) #", "nodes (users, posts, etc) in the graph \"\"\" return len(self.cred_data['nodeSummaries']) @property def nodes(self)", "= pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0]) self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1]) self.cache['df_cred_eflow']['plugin']", "if self.cache['df_cred_eflow'] is None: def set_plugin(label): for prefix, plugin in plugin_prefixes.items(): if label.startswith(prefix):", "} edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']]) # for et in plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']]", "plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']] = nt['weight'] plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name for p_name in", "node = dict() address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] = f'{address[0]}/{address[1]}' node['address.nodeType'] = address[2] node['address.id']", "return node @property def total_nodes(self) -> int: \"\"\" Total amount of nodes (users,", "the history of grain distribution \"\"\" if self.cache['df_grain'] is None: grain_history = [acc", "= pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T", "graph \"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY'] def get_user_ranking(self)", "self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns = ['weight'] self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def", "def intervals(self, to_datetime=False) -> List[Any]: \"\"\" Returns timestamp intervals where cred was computed", "= address[3] node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else [] node['description']", "if self.cache['df'] is None: self.to_df() self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns = ['credTimestampMs',", "# EDGES df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for v", "file \"\"\" if self.cache['df_accounts'] is None: self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) /", "None: self.get_grain_distribution() return self.cache['df_grain'].amount.sum() @property def accounts(self) -> pd.DataFrame: \"\"\" Returns user accounts", "= (self.cache['df_rank'].totalCred / distributed_cred) * 100 df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']] distributed_cred =", "plugin_meta[plugin['name']]['edgeTypes']]) # for et in plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']] = et['weight'] nodes.extend([nt['prefix'] for nt", "df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0]) self.cache['df_cred_eflow']['forward'] =", "is None: self.to_df() return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY'] def get_user_ranking(self) -> pd.DataFrame: \"\"\" Returns", "= pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T self.cache['df_grain'].columns = ['credTimestampMs', 'amount'] return self.cache['df_grain'] def get_cred_over_time(self)", "# distributedCred = self.df.totalCred.sum() # self.df['credShare'] = self.df.totalCred / distributedCred return self.cache['df'] def", "self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) # NODES df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns = ['node', 'weight']", "= self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY' else None return node", "'nodePrefix': plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'], 'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']} for et in plugin['edgeTypes']],", "self.cred_data def get_node(self, i: int) -> Dict[str, Any]: \"\"\" Returns specifc node's information", "self.cred_data['nodeOverTime'][i] else [] node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if", "EDGES df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for v in", "in the graph \"\"\" return len(self.cred_data['nodeSummaries']) @property def nodes(self) -> List[Any]: \"\"\" Returns", "df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns = ['weight'] self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow'])", "self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def __repr__(self) -> str: return \"<{} - ({} nodes", "(self.cache['df_rank'].totalCred / distributed_cred) * 100 df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']] distributed_cred = df_rank_p.totalCred.sum()", "= pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns = ['node', 'weight'] cred_nodes = dict() for n", "\"\"\" Return intervals in datetime format \"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval", "self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY' else None return node @property", "df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()]", "self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def __repr__(self) -> str: return \"<{} -", "if self.cache['df_grain'] is None: self.get_grain_distribution() return self.cache['df_grain'].amount.sum() @property def accounts(self) -> pd.DataFrame: \"\"\"", "self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0]) self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1])", "def distributed_cred(self) -> float: \"\"\" Returns total distributed cred \"\"\" if self.cache['df'] is", "['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True) return self.cache['df_cred_ot'] def to_df(self) -> pd.DataFrame: \"\"\" Retuns", "self.get_grain_distribution() return self.cache['df_grain'].amount.sum() @property def accounts(self) -> pd.DataFrame: \"\"\" Returns user accounts info", "1e18 self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18 return self.cache['df_accounts'] def get_user_nodes(self) -> pd.DataFrame: \"\"\"", "['weight'] self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def __repr__(self) -> str: return \"<{}", "get_dt_intervals(self) -> List[Any]: \"\"\" Return intervals in datetime format \"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')] /", "return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def __repr__(self) -> str: return \"<{} - ({} nodes &", "edges_weights = dict() # nodes_weights = dict() for plugin in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] =", "= self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else [] node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user']", "'df_cred_eflow': None, 'df_cred_nflow': None, } def get_weighted_graph(self, data) -> Dict[str, Any]: \"\"\" Weighted", "'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare'] return self.cache['df_rank'] def get_grain_distribution(self) -> pd.DataFrame: \"\"\"", "pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True) return self.cache['df_cred_ot'] def to_df(self)", "\"\"\" return self.cred_data def get_node(self, i: int) -> Dict[str, Any]: \"\"\" Returns specifc", "node['address.source'] = f'{address[0]}/{address[1]}' node['address.nodeType'] = address[2] node['address.id'] = address[3] node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime']", "nodes in the graph \"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'][self.cache['df']['address.nodeType'] ==", "= cred_data[1]['weightedGraph'][1] self.cred_data = cred_data[1]['credData'] self.accounts_data = accounts_data self.cache = { 'df': None,", "history of grain distribution \"\"\" if self.cache['df_grain'] is None: grain_history = [acc for", "of grain distribution \"\"\" if self.cache['df_grain'] is None: grain_history = [acc for acc", "self.df['credShare'] = self.df.totalCred / distributedCred return self.cache['df'] def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\"", "\"\"\" Weighted graph from CredResult JSON data \"\"\" return self.weighted_graph def get_cred_data(self) ->", "'type', 'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare'] return self.cache['df_rank'] def get_grain_distribution(self) -> pd.DataFrame:", "in the graph \"\"\" return [self.get_node(i) for i in range(self.total_nodes)] @property def intervals(self,", "is None: self.get_grain_distribution() return self.cache['df_grain'].amount.sum() @property def accounts(self) -> pd.DataFrame: \"\"\" Returns user", "if self.cred_data['nodeOverTime'][i] else [] node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description']", "nt['weight'] plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name", "cred_data[1]['weightedGraph'][1] self.cred_data = cred_data[1]['credData'] self.accounts_data = accounts_data self.cache = { 'df': None, 'df_rank':", "self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred) * 100 df_rank_p = self.get_user_nodes()[['address.id', 'totalCred',", "= dict() address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] = f'{address[0]}/{address[1]}' node['address.nodeType'] = address[2] node['address.id'] =", "as a DataFrame \"\"\" if self.cache['df'] is None: self.cache['df'] = pd.json_normalize(self.nodes) self.cache['df'].timestamp =", "for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns = ['edge',", "-> float: \"\"\" Returns total distributed grain \"\"\" if self.cache['df_grain'] is None: self.get_grain_distribution()", "in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta}) # EDGES df_ew =", "from typing import Any, Dict, List, Tuple class CredData(): \"\"\" Parses information from", "def get_dt_intervals(self) -> List[Any]: \"\"\" Return intervals in datetime format \"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')]", "grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18} \\ for acc in grain_history", "return self.cache['df_grain'] def get_cred_over_time(self) -> pd.DataFrame: \"\"\" Returns distributed cred summary over all", "\"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval in self.intervals] @property def distributed_cred(self) ->", "= pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18 self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18 return", "in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] = { 'nodePrefix': plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'], 'edgeTypes': [{'prefix': et['prefix'], 'weight':", "return self.cred_data def get_node(self, i: int) -> Dict[str, Any]: \"\"\" Returns specifc node's", "-> float: \"\"\" Returns total distributed cred \"\"\" if self.cache['df'] is None: self.to_df()", "/ 1e18 return self.cache['df_accounts'] def get_user_nodes(self) -> pd.DataFrame: \"\"\" Returns user nodes in", "self.to_df() return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY'] def get_user_ranking(self) -> pd.DataFrame: \"\"\" Returns the user", "# edges_weights[et['prefix']] = et['weight'] nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']]) # for nt in", "(sourcecred <= v0.7x) \"\"\" def __init__(self, cred_data, accounts_data): self.cred_json_data = cred_data self.weighted_graph =", "'account.active', 'account.balance', 'account.paid' ]] self.cache['df_rank'] = df_acc_p.join(df_rank_p, on='account.identity.id', how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns =", "for p_name in plugin_meta}) # EDGES df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for v in", "if self.cache['df_rank'] is None: # self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) # distributed_cred = self.cache['df_rank'].totalCred.sum()", "far \"\"\" if self.cache['df_rank'] is None: # self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) # distributed_cred", "accounts_data self.cache = { 'df': None, 'df_rank': None, 'df_grain': None, 'df_accounts': None, 'df_cred_ot':", "[acc for acc in self.accounts_data['accounts'] if 'allocationHistory' in acc['account']] if len(grain_history) > 0:", "\\ for acc in grain_history for record in acc['account']['allocationHistory']] self.cache['df_grain'] = pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs']", "a DataFrame \"\"\" if self.cache['df'] is None: self.cache['df'] = pd.json_normalize(self.nodes) self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp,", "dict() for plugin in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] = { 'nodePrefix': plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'], 'edgeTypes':", "df_nw.columns = ['node', 'weight'] cred_nodes = dict() for n in nodes: cred_nodes[n.replace('\\x00', '')]", "Returns specifc node's information \"\"\" node = dict() address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] =", "self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) # distributed_cred = self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred /", "intervals in datetime format \"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval in self.intervals]", "from datetime import datetime import pandas as pd from typing import Any, Dict,", "dict() edges = [] nodes = [] # edges_weights = dict() # nodes_weights", "\"\"\" node = dict() address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] = f'{address[0]}/{address[1]}' node['address.nodeType'] = address[2]", "cred_data self.weighted_graph = cred_data[1]['weightedGraph'][1] self.cred_data = cred_data[1]['credData'] self.accounts_data = accounts_data self.cache = {", "in plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']] = nt['weight'] plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name for p_name", "self.weighted_graph = cred_data[1]['weightedGraph'][1] self.cred_data = cred_data[1]['credData'] self.accounts_data = accounts_data self.cache = { 'df':", "is None: self.to_df() return self.cache['df'].totalCred.sum() @property def distributed_grain(self) -> float: \"\"\" Returns total", "f'{address[0]}/{address[1]}' node['address.nodeType'] = address[2] node['address.id'] = address[3] node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred']", "def accounts(self) -> pd.DataFrame: \"\"\" Returns user accounts info from 'output/accounts.json' file \"\"\"", "None: # self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) # distributed_cred = self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare'] =", "'user', 'type', 'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare'] return self.cache['df_rank'] def get_grain_distribution(self) ->", "'Not Found' # PREPROCESSING plugin_meta = dict() edges = [] nodes = []", "} def get_weighted_graph(self, data) -> Dict[str, Any]: \"\"\" Weighted graph from CredResult JSON", "] self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0]) self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x:", "PREPROCESSING plugin_meta = dict() edges = [] nodes = [] # edges_weights =", "'edgePrefix': plugin['edgePrefix'], 'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']} for et in plugin['edgeTypes']], 'nodeTypes': [{'prefix':", "]).T df_nw.columns = ['node', 'weight'] cred_nodes = dict() for n in nodes: cred_nodes[n.replace('\\x00',", "to_df(self) -> pd.DataFrame: \"\"\" Retuns all nodes data as a DataFrame \"\"\" if", "Dict[str, Any]: \"\"\" Raw CredResult JSON data \"\"\" return self.cred_data def get_node(self, i:", "return 'Not Found' # PREPROCESSING plugin_meta = dict() edges = [] nodes =", "from 'output/accounts.json' file \"\"\" if self.cache['df_accounts'] is None: self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] =", "df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred) * 100 df_rank_p.set_index('address.id', inplace=True) df_acc_p = self.accounts[['account.identity.id', 'account.identity.name',", "et['defaultWeight']} for et in plugin['edgeTypes']], 'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for nt in", "p_name in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta}) # EDGES df_ew", "if label.startswith(prefix): return plugin return 'Not Found' # PREPROCESSING plugin_meta = dict() edges", "int(record['grainReceipt']['amount']) / 1e18} \\ for acc in grain_history for record in acc['account']['allocationHistory']] self.cache['df_grain']", "distributed cred \"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'].totalCred.sum() @property def distributed_grain(self)", "info from 'output/accounts.json' file \"\"\" if self.cache['df_accounts'] is None: self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance']", "user accounts info from 'output/accounts.json' file \"\"\" if self.cache['df_accounts'] is None: self.cache['df_accounts'] =", "get_user_nodes(self) -> pd.DataFrame: \"\"\" Returns user nodes in the graph \"\"\" if self.cache['df']", "{plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name for p_name in", "def set_plugin(label): for prefix, plugin in plugin_prefixes.items(): if label.startswith(prefix): return plugin return 'Not", "distributed_cred = df_rank_p.totalCred.sum() df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred) * 100 df_rank_p.set_index('address.id', inplace=True) df_acc_p", "return self.cache['df'] def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Gets cred flow through nodes", "nodes_weights[nt['prefix']] = nt['weight'] plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00',", "* len(self.intervals)]).T self.cache['df_grain'].columns = ['credTimestampMs', 'amount'] return self.cache['df_grain'] def get_cred_over_time(self) -> pd.DataFrame: \"\"\"", "timestamp intervals where cred was computed \"\"\" return self.cred_data['intervals'] def get_dt_intervals(self) -> List[Any]:", "is None: self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18 self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float)", "len(self.intervals)]).T self.cache['df_grain'].columns = ['credTimestampMs', 'amount'] return self.cache['df_grain'] def get_cred_over_time(self) -> pd.DataFrame: \"\"\" Returns", "p_name for p_name in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta}) #", "typing import Any, Dict, List, Tuple class CredData(): \"\"\" Parses information from Sourcecred", "distributed grain \"\"\" if self.cache['df_grain'] is None: self.get_grain_distribution() return self.cache['df_grain'].amount.sum() @property def accounts(self)", "self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18 self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18", "in edges: cred_edges[e.replace('\\x00', '')] = [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward']", "float: \"\"\" Returns total distributed grain \"\"\" if self.cache['df_grain'] is None: self.get_grain_distribution() return", "(users, posts, etc) in the graph \"\"\" return len(self.cred_data['nodeSummaries']) @property def nodes(self) ->", "format (sourcecred <= v0.7x) \"\"\" def __init__(self, cred_data, accounts_data): self.cred_json_data = cred_data self.weighted_graph", "CredResult JSON data \"\"\" return self.weighted_graph def get_cred_data(self) -> Dict[str, Any]: \"\"\" Raw", "address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] = f'{address[0]}/{address[1]}' node['address.nodeType'] = address[2] node['address.id'] = address[3] node['totalCred']", "= pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else: # zeros self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T self.cache['df_grain'].columns", "datetime import datetime import pandas as pd from typing import Any, Dict, List,", "Any]: \"\"\" Weighted graph from CredResult JSON data \"\"\" return self.weighted_graph def get_cred_data(self)", "= cred_data self.weighted_graph = cred_data[1]['weightedGraph'][1] self.cred_data = cred_data[1]['credData'] self.accounts_data = accounts_data self.cache =", "Returns total distributed cred \"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'].totalCred.sum() @property", "zeros self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T self.cache['df_grain'].columns = ['credTimestampMs', 'amount'] return self.cache['df_grain']", "'df': None, 'df_rank': None, 'df_grain': None, 'df_accounts': None, 'df_cred_ot': None, 'df_cred_eflow': None, 'df_cred_nflow':", "ascending=False).reset_index(drop=True) self.cache['df_rank'].columns = ['id', 'user', 'type', 'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare'] return", "self.to_df() return self.cache['df'].totalCred.sum() @property def distributed_grain(self) -> float: \"\"\" Returns total distributed grain", "List, Tuple class CredData(): \"\"\" Parses information from Sourcecred - Works with TimelineCred", "distributed_cred(self) -> float: \"\"\" Returns total distributed cred \"\"\" if self.cache['df'] is None:", "self.intervals] @property def distributed_cred(self) -> float: \"\"\" Returns total distributed cred \"\"\" if", "over all intervals \"\"\" if self.cache['df_cred_ot'] is None: if self.cache['df'] is None: self.to_df()", "\"\"\" def __init__(self, cred_data, accounts_data): self.cred_json_data = cred_data self.weighted_graph = cred_data[1]['weightedGraph'][1] self.cred_data =", "-> List[Any]: \"\"\" Returns timestamp intervals where cred was computed \"\"\" return self.cred_data['intervals']", "total distributed grain \"\"\" if self.cache['df_grain'] is None: self.get_grain_distribution() return self.cache['df_grain'].amount.sum() @property def", "\"\"\" if self.cache['df_grain'] is None: grain_history = [acc for acc in self.accounts_data['accounts'] if", "'backward', 'forward'] cred_edges = dict() for e in edges: cred_edges[e.replace('\\x00', '')] = [", "self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY' else None", "= [] # edges_weights = dict() # nodes_weights = dict() for plugin in", "/ 1e18 self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18 return self.cache['df_accounts'] def get_user_nodes(self) -> pd.DataFrame:", "in acc['account']] if len(grain_history) > 0: grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) /", "= [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0])", "user raking by total amount of cred gained so far \"\"\" if self.cache['df_rank']", "self.cache['df_rank'] is None: # self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) # distributed_cred = self.cache['df_rank'].totalCred.sum() #", "distributed_cred) * 100 df_rank_p.set_index('address.id', inplace=True) df_acc_p = self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype', 'account.active', 'account.balance', 'account.paid'", ").sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns = ['id', 'user', 'type', 'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare']", "return self.cache['df_accounts'] def get_user_nodes(self) -> pd.DataFrame: \"\"\" Returns user nodes in the graph", "self.cache['df_cred_eflow']) def __repr__(self) -> str: return \"<{} - ({} nodes & {} distributed", "= self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred) * 100 df_rank_p = self.get_user_nodes()[['address.id',", "def get_weighted_graph(self, data) -> Dict[str, Any]: \"\"\" Weighted graph from CredResult JSON data", "for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns = ['edge', 'backward', 'forward'] cred_edges = dict()", "n in nodes: cred_nodes[n.replace('\\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns = ['weight']", "@property def nodes(self) -> List[Any]: \"\"\" Returns all nodes in the graph \"\"\"", "[{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for nt in plugin['nodeTypes']], } edges.extend([et['prefix'] for et in", "# nodes_weights[nt['prefix']] = nt['weight'] plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta}", "cred_nodes[n.replace('\\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns = ['weight'] self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin)", "self.cache['df_accounts'] is None: self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18 self.cache['df_accounts']['account.paid'] =", "= self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else [] node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp']", "/ 1e18} \\ for acc in grain_history for record in acc['account']['allocationHistory']] self.cache['df_grain'] =", "cred_data[1]['credData'] self.accounts_data = accounts_data self.cache = { 'df': None, 'df_rank': None, 'df_grain': None,", "self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True) return self.cache['df_cred_ot'] def to_df(self) -> pd.DataFrame: \"\"\" Retuns all nodes", "intervals where cred was computed \"\"\" return self.cred_data['intervals'] def get_dt_intervals(self) -> List[Any]: \"\"\"", "data format (sourcecred <= v0.7x) \"\"\" def __init__(self, cred_data, accounts_data): self.cred_json_data = cred_data", "= pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns = ['weight'] self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def __repr__(self)", "et['prefix'], 'weight': et['defaultWeight']} for et in plugin['edgeTypes']], 'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for", "def nodes(self) -> List[Any]: \"\"\" Returns all nodes in the graph \"\"\" return", "information from Sourcecred - Works with TimelineCred data format (sourcecred <= v0.7x) \"\"\"", "self.cache['df_cred_nflow'].columns = ['weight'] self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def __repr__(self) -> str:", "nt in plugin['nodeTypes']], } edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']]) # for et in", "pd.DataFrame: \"\"\" Retuns all nodes data as a DataFrame \"\"\" if self.cache['df'] is", "None, 'df_rank': None, 'df_grain': None, 'df_accounts': None, 'df_cred_ot': None, 'df_cred_eflow': None, 'df_cred_nflow': None,", "def total_nodes(self) -> int: \"\"\" Total amount of nodes (users, posts, etc) in", "= self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY' else None return node @property def total_nodes(self)", "> 0: grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18} \\ for acc", "node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else [] node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description']", "if node['address.nodeType'] == 'IDENTITY' else None return node @property def total_nodes(self) -> int:", "'df_cred_ot': None, 'df_cred_eflow': None, 'df_cred_nflow': None, } def get_weighted_graph(self, data) -> Dict[str, Any]:", "\"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'].totalCred.sum() @property def distributed_grain(self) -> float:", "address[3] node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else [] node['description'] =", "'credOverTime', 'credShare'] return self.cache['df_rank'] def get_grain_distribution(self) -> pd.DataFrame: \"\"\" Returns the history of", "i: int) -> Dict[str, Any]: \"\"\" Returns specifc node's information \"\"\" node =", "by total amount of cred gained so far \"\"\" if self.cache['df_rank'] is None:", "nt['defaultWeight']} for nt in plugin['nodeTypes']], } edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']]) # for", "[v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns =", "'weight'] cred_nodes = dict() for n in nodes: cred_nodes[n.replace('\\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow']", "len(grain_history) > 0: grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18} \\ for", "record in acc['account']['allocationHistory']] self.cache['df_grain'] = pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else: # zeros", "if 'allocationHistory' in acc['account']] if len(grain_history) > 0: grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount':", "total amount of cred gained so far \"\"\" if self.cache['df_rank'] is None: #", "self.cache['df_grain'].amount.sum() @property def accounts(self) -> pd.DataFrame: \"\"\" Returns user accounts info from 'output/accounts.json'", "Any]: \"\"\" Returns specifc node's information \"\"\" node = dict() address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i]", "pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns", "NODES df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns = ['node', 'weight'] cred_nodes = dict()", "get_grain_distribution(self) -> pd.DataFrame: \"\"\" Returns the history of grain distribution \"\"\" if self.cache['df_grain']", "= ['credTimestampMs', 'amount'] return self.cache['df_grain'] def get_cred_over_time(self) -> pd.DataFrame: \"\"\" Returns distributed cred", "= self.cache['df_accounts']['account.balance'].map(float) / 1e18 self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18 return self.cache['df_accounts'] def get_user_nodes(self)", "return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY'] def get_user_ranking(self) -> pd.DataFrame: \"\"\" Returns the user raking", "if len(grain_history) > 0: grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18} \\", "]).T self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True) return self.cache['df_cred_ot'] def to_df(self) ->", "= ['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True) return self.cache['df_cred_ot'] def to_df(self) -> pd.DataFrame: \"\"\"", "for et in plugin['edgeTypes']], 'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for nt in plugin['nodeTypes']],", "self.cred_data['intervals'] def get_dt_intervals(self) -> List[Any]: \"\"\" Return intervals in datetime format \"\"\" return", "def distributed_grain(self) -> float: \"\"\" Returns total distributed grain \"\"\" if self.cache['df_grain'] is", "self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] = f'{address[0]}/{address[1]}' node['address.nodeType'] = address[2] node['address.id'] = address[3] node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred']", "-> int: \"\"\" Total amount of nodes (users, posts, etc) in the graph", "def __init__(self, cred_data, accounts_data): self.cred_json_data = cred_data self.weighted_graph = cred_data[1]['weightedGraph'][1] self.cred_data = cred_data[1]['credData']", "for et in plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']] = et['weight'] nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']])", "float: \"\"\" Returns total distributed cred \"\"\" if self.cache['df'] is None: self.to_df() return", "= df_rank_p.totalCred.sum() df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred) * 100 df_rank_p.set_index('address.id', inplace=True) df_acc_p =", "'df_cred_nflow': None, } def get_weighted_graph(self, data) -> Dict[str, Any]: \"\"\" Weighted graph from", "{ 'df': None, 'df_rank': None, 'df_grain': None, 'df_accounts': None, 'df_cred_ot': None, 'df_cred_eflow': None,", "Any]: \"\"\" Raw CredResult JSON data \"\"\" return self.cred_data def get_node(self, i: int)", "self.cache['df_cred_ot'] def to_df(self) -> pd.DataFrame: \"\"\" Retuns all nodes data as a DataFrame", "all nodes in the graph \"\"\" return [self.get_node(i) for i in range(self.total_nodes)] @property", "Returns the user raking by total amount of cred gained so far \"\"\"", "self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] = { 'nodePrefix': plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'], 'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']}", "pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else: # zeros self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T self.cache['df_grain'].columns =", "in plugin_prefixes.items(): if label.startswith(prefix): return plugin return 'Not Found' # PREPROCESSING plugin_meta =", "self.cache['df_cred_ot'] is None: if self.cache['df'] is None: self.to_df() self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T", "for et in plugin_meta[plugin['name']]['edgeTypes']]) # for et in plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']] = et['weight']", "self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']] distributed_cred = df_rank_p.totalCred.sum() df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred) * 100", "data) -> Dict[str, Any]: \"\"\" Weighted graph from CredResult JSON data \"\"\" return", "plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta}) # EDGES df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards']", "'amount': int(record['grainReceipt']['amount']) / 1e18} \\ for acc in grain_history for record in acc['account']['allocationHistory']]", "summary over all intervals \"\"\" if self.cache['df_cred_ot'] is None: if self.cache['df'] is None:", "Raw CredResult JSON data \"\"\" return self.cred_data def get_node(self, i: int) -> Dict[str,", "ascending=False).reset_index(drop=True) # distributed_cred = self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred) * 100", "[datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval in self.intervals] @property def distributed_cred(self) -> float: \"\"\"", "cred gained so far \"\"\" if self.cache['df_rank'] is None: # self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred',", "plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name for", "'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True) return self.cache['df_cred_ot'] def to_df(self) -> pd.DataFrame: \"\"\" Retuns all", "for n in nodes: cred_nodes[n.replace('\\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns =", "data \"\"\" return self.cred_data def get_node(self, i: int) -> Dict[str, Any]: \"\"\" Returns", "100 df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']] distributed_cred = df_rank_p.totalCred.sum() df_rank_p['credShare'] = (df_rank_p.totalCred /", "None: grain_history = [acc for acc in self.accounts_data['accounts'] if 'allocationHistory' in acc['account']] if", "None, 'df_cred_nflow': None, } def get_weighted_graph(self, data) -> Dict[str, Any]: \"\"\" Weighted graph", "graph. \"\"\" if self.cache['df_cred_eflow'] is None: def set_plugin(label): for prefix, plugin in plugin_prefixes.items():", "the graph \"\"\" return len(self.cred_data['nodeSummaries']) @property def nodes(self) -> List[Any]: \"\"\" Returns all", "-> pd.DataFrame: \"\"\" Returns the user raking by total amount of cred gained", "Returns timestamp intervals where cred was computed \"\"\" return self.cred_data['intervals'] def get_dt_intervals(self) ->", "'credOverTime']] distributed_cred = df_rank_p.totalCred.sum() df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred) * 100 df_rank_p.set_index('address.id', inplace=True)", "plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta}) # EDGES df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(),", "edges: cred_edges[e.replace('\\x00', '')] = [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] =", "# NODES df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns = ['node', 'weight'] cred_nodes =", "self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) # NODES df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns", "\"\"\" if self.cache['df_rank'] is None: # self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) # distributed_cred =", "nodes in the graph \"\"\" return [self.get_node(i) for i in range(self.total_nodes)] @property def", "total distributed cred \"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'].totalCred.sum() @property def", "[ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0]) self.cache['df_cred_eflow']['forward']", "self.df.totalCred / distributedCred return self.cache['df'] def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Gets cred", "= dict() edges = [] nodes = [] # edges_weights = dict() #", "address[2] node['address.id'] = address[3] node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else", "cred_nodes = dict() for n in nodes: cred_nodes[n.replace('\\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] =", "# PREPROCESSING plugin_meta = dict() edges = [] nodes = [] # edges_weights", "return self.cache['df_cred_ot'] def to_df(self) -> pd.DataFrame: \"\"\" Retuns all nodes data as a", "Found' # PREPROCESSING plugin_meta = dict() edges = [] nodes = [] #", "'df_accounts': None, 'df_cred_ot': None, 'df_cred_eflow': None, 'df_cred_nflow': None, } def get_weighted_graph(self, data) ->", "def get_user_nodes(self) -> pd.DataFrame: \"\"\" Returns user nodes in the graph \"\"\" if", "is None: grain_history = [acc for acc in self.accounts_data['accounts'] if 'allocationHistory' in acc['account']]", "-> pd.DataFrame: \"\"\" Returns user nodes in the graph \"\"\" if self.cache['df'] is", "= address[2] node['address.id'] = address[3] node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i]", "[{'prefix': et['prefix'], 'weight': et['defaultWeight']} for et in plugin['edgeTypes']], 'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']}", "self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1]) self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) # NODES df_nw", "\"\"\" Returns the user raking by total amount of cred gained so far", "user nodes in the graph \"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'][self.cache['df']['address.nodeType']", "self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0]) self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1]) self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True)", "= pd.to_datetime(self.cache['df'].timestamp, unit='ms') # distributedCred = self.df.totalCred.sum() # self.df['credShare'] = self.df.totalCred / distributedCred", "\"\"\" Returns all nodes in the graph \"\"\" return [self.get_node(i) for i in", "1e18} \\ for acc in grain_history for record in acc['account']['allocationHistory']] self.cache['df_grain'] = pd.json_normalize(grain_distribution)", "= { 'df': None, 'df_rank': None, 'df_grain': None, 'df_accounts': None, 'df_cred_ot': None, 'df_cred_eflow':", "nodes: cred_nodes[n.replace('\\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns = ['weight'] self.cache['df_cred_nflow']['plugin'] =", "\"\"\" if self.cache['df_accounts'] is None: self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18", "for e in edges: cred_edges[e.replace('\\x00', '')] = [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow'] =", "distributedCred return self.cache['df'] def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Gets cred flow through", "for record in acc['account']['allocationHistory']] self.cache['df_grain'] = pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else: #", "/ 1000) for interval in self.intervals] @property def distributed_cred(self) -> float: \"\"\" Returns", "self.cache['df_grain'] = pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else: # zeros self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(),", "data as a DataFrame \"\"\" if self.cache['df'] is None: self.cache['df'] = pd.json_normalize(self.nodes) self.cache['df'].timestamp", "prefix, plugin in plugin_prefixes.items(): if label.startswith(prefix): return plugin return 'Not Found' # PREPROCESSING", "nt['prefix'], 'weight': nt['defaultWeight']} for nt in plugin['nodeTypes']], } edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']])", "plugin['nodeTypes']], } edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']]) # for et in plugin_meta[plugin['name']]['edgeTypes']: #", "'weight': nt['defaultWeight']} for nt in plugin['nodeTypes']], } edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']]) #", "= self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] = f'{address[0]}/{address[1]}' node['address.nodeType'] = address[2] node['address.id'] = address[3] node['totalCred'] =", "return self.cache['df'].totalCred.sum() @property def distributed_grain(self) -> float: \"\"\" Returns total distributed grain \"\"\"", "graph from CredResult JSON data \"\"\" return self.weighted_graph def get_cred_data(self) -> Dict[str, Any]:", "distributed_grain(self) -> float: \"\"\" Returns total distributed grain \"\"\" if self.cache['df_grain'] is None:", "df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']] distributed_cred = df_rank_p.totalCred.sum() df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred)", "pd.DataFrame: \"\"\" Returns distributed cred summary over all intervals \"\"\" if self.cache['df_cred_ot'] is", "['node', 'weight'] cred_nodes = dict() for n in nodes: cred_nodes[n.replace('\\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum()", "self.cache['df'] = pd.json_normalize(self.nodes) self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms') # distributedCred = self.df.totalCred.sum() # self.df['credShare']", "\"\"\" Returns total distributed cred \"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'].totalCred.sum()", "intervals \"\"\" if self.cache['df_cred_ot'] is None: if self.cache['df'] is None: self.to_df() self.cache['df_cred_ot'] =", "in the cred graph. \"\"\" if self.cache['df_cred_eflow'] is None: def set_plugin(label): for prefix,", "None: if self.cache['df'] is None: self.to_df() self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns =", "et['weight'] nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']]) # for nt in plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']]", "0: grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18} \\ for acc in", "self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) # NODES df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns = ['node',", "[{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18} \\ for acc in grain_history for record", "len(self.cred_data['nodeSummaries']) @property def nodes(self) -> List[Any]: \"\"\" Returns all nodes in the graph", "else: # zeros self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T self.cache['df_grain'].columns = ['credTimestampMs', 'amount']", "= ['node', 'weight'] cred_nodes = dict() for n in nodes: cred_nodes[n.replace('\\x00', '')] =", "distribution \"\"\" if self.cache['df_grain'] is None: grain_history = [acc for acc in self.accounts_data['accounts']", "plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']] = et['weight'] nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']]) # for nt", "v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns = ['edge', 'backward',", "in plugin_meta[plugin['name']]['nodeTypes']]) # for nt in plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']] = nt['weight'] plugin_prefixes =", "x[1]) self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) # NODES df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T", "'allocationHistory' in acc['account']] if len(grain_history) > 0: grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount'])", "acc['account']['allocationHistory']] self.cache['df_grain'] = pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else: # zeros self.cache['df_grain'] =", "if self.cache['df_grain'] is None: grain_history = [acc for acc in self.accounts_data['accounts'] if 'allocationHistory'", "set_plugin(label): for prefix, plugin in plugin_prefixes.items(): if label.startswith(prefix): return plugin return 'Not Found'", "-> List[Any]: \"\"\" Returns all nodes in the graph \"\"\" return [self.get_node(i) for", "@property def distributed_cred(self) -> float: \"\"\" Returns total distributed cred \"\"\" if self.cache['df']", "return plugin return 'Not Found' # PREPROCESSING plugin_meta = dict() edges = []", "1000) for interval in self.intervals] @property def distributed_cred(self) -> float: \"\"\" Returns total", "pd.DataFrame: \"\"\" Returns the user raking by total amount of cred gained so", "/ distributed_cred) * 100 df_rank_p.set_index('address.id', inplace=True) df_acc_p = self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype', 'account.active', 'account.balance',", "def get_cred_over_time(self) -> pd.DataFrame: \"\"\" Returns distributed cred summary over all intervals \"\"\"", "pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else: # zeros self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] *", "'forward'] cred_edges = dict() for e in edges: cred_edges[e.replace('\\x00', '')] = [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(),", "Tuple class CredData(): \"\"\" Parses information from Sourcecred - Works with TimelineCred data", "<= v0.7x) \"\"\" def __init__(self, cred_data, accounts_data): self.cred_json_data = cred_data self.weighted_graph = cred_data[1]['weightedGraph'][1]", "in datetime format \"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval in self.intervals] @property", "grain_history = [acc for acc in self.accounts_data['accounts'] if 'allocationHistory' in acc['account']] if len(grain_history)", "unit='ms') else: # zeros self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T self.cache['df_grain'].columns = ['credTimestampMs',", "'account.balance', 'account.paid' ]] self.cache['df_rank'] = df_acc_p.join(df_rank_p, on='account.identity.id', how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns = ['id',", "pd.DataFrame: \"\"\" Returns the history of grain distribution \"\"\" if self.cache['df_grain'] is None:", "where cred was computed \"\"\" return self.cred_data['intervals'] def get_dt_intervals(self) -> List[Any]: \"\"\" Return", "self.cache['df'] is None: self.cache['df'] = pd.json_normalize(self.nodes) self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms') # distributedCred =", "self.cache['df'] is None: self.to_df() return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY'] def get_user_ranking(self) -> pd.DataFrame: \"\"\"", "v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns = ['edge', 'backward', 'forward'] cred_edges = dict() for", "if self.cache['df_cred_ot'] is None: if self.cache['df'] is None: self.to_df() self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum()", "through nodes & edges in the cred graph. \"\"\" if self.cache['df_cred_eflow'] is None:", "= dict() for n in nodes: cred_nodes[n.replace('\\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T", "(df_rank_p.totalCred / distributed_cred) * 100 df_rank_p.set_index('address.id', inplace=True) df_acc_p = self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype', 'account.active',", "\"\"\" return len(self.cred_data['nodeSummaries']) @property def nodes(self) -> List[Any]: \"\"\" Returns all nodes in", "nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']]) # for nt in plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']] =", "nt in plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']] = nt['weight'] plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name for", "pd.to_datetime(self.cache['df'].timestamp, unit='ms') # distributedCred = self.df.totalCred.sum() # self.df['credShare'] = self.df.totalCred / distributedCred return", "1e18 return self.cache['df_accounts'] def get_user_nodes(self) -> pd.DataFrame: \"\"\" Returns user nodes in the", "= {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name for p_name", "@property def intervals(self, to_datetime=False) -> List[Any]: \"\"\" Returns timestamp intervals where cred was", "\"\"\" Returns distributed cred summary over all intervals \"\"\" if self.cache['df_cred_ot'] is None:", "cred summary over all intervals \"\"\" if self.cache['df_cred_ot'] is None: if self.cache['df'] is", "TimelineCred data format (sourcecred <= v0.7x) \"\"\" def __init__(self, cred_data, accounts_data): self.cred_json_data =", "self.cred_json_data = cred_data self.weighted_graph = cred_data[1]['weightedGraph'][1] self.cred_data = cred_data[1]['credData'] self.accounts_data = accounts_data self.cache", "edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']]) # for et in plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']] =", "Dict, List, Tuple class CredData(): \"\"\" Parses information from Sourcecred - Works with", "None, 'df_grain': None, 'df_accounts': None, 'df_cred_ot': None, 'df_cred_eflow': None, 'df_cred_nflow': None, } def", "List[Any]: \"\"\" Returns all nodes in the graph \"\"\" return [self.get_node(i) for i", "int) -> Dict[str, Any]: \"\"\" Returns specifc node's information \"\"\" node = dict()", "grain_history for record in acc['account']['allocationHistory']] self.cache['df_grain'] = pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else:", "-> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Gets cred flow through nodes & edges in the", "the cred graph. \"\"\" if self.cache['df_cred_eflow'] is None: def set_plugin(label): for prefix, plugin", "= pd.json_normalize(self.nodes) self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms') # distributedCred = self.df.totalCred.sum() # self.df['credShare'] =", "dict() for e in edges: cred_edges[e.replace('\\x00', '')] = [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow']", "acc in grain_history for record in acc['account']['allocationHistory']] self.cache['df_grain'] = pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'],", "= dict() for e in edges: cred_edges[e.replace('\\x00', '')] = [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ]", "self.accounts_data['accounts'] if 'allocationHistory' in acc['account']] if len(grain_history) > 0: grain_distribution = [{'credTimestampMs': record['credTimestampMs'],", "Returns user nodes in the graph \"\"\" if self.cache['df'] is None: self.to_df() return", "format \"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval in self.intervals] @property def distributed_cred(self)", "pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T self.cache['df_grain'].columns = ['credTimestampMs', 'amount'] return self.cache['df_grain'] def get_cred_over_time(self) ->", "amount of cred gained so far \"\"\" if self.cache['df_rank'] is None: # self.cache['df_rank']", "acc['account']] if len(grain_history) > 0: grain_distribution = [{'credTimestampMs': record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18}", "# for nt in plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']] = nt['weight'] plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''):", "for nt in plugin['nodeTypes']], } edges.extend([et['prefix'] for et in plugin_meta[plugin['name']]['edgeTypes']]) # for et", "= self.cache['df_accounts']['account.paid'].map(float) / 1e18 return self.cache['df_accounts'] def get_user_nodes(self) -> pd.DataFrame: \"\"\" Returns user", "'credShare'] return self.cache['df_rank'] def get_grain_distribution(self) -> pd.DataFrame: \"\"\" Returns the history of grain", "# zeros self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T self.cache['df_grain'].columns = ['credTimestampMs', 'amount'] return", "self.cache['df_grain'] def get_cred_over_time(self) -> pd.DataFrame: \"\"\" Returns distributed cred summary over all intervals", "self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True) return self.cache['df_cred_ot'] def to_df(self) -> pd.DataFrame:", "CredResult JSON data \"\"\" return self.cred_data def get_node(self, i: int) -> Dict[str, Any]:", "\"\"\" return [self.get_node(i) for i in range(self.total_nodes)] @property def intervals(self, to_datetime=False) -> List[Any]:", "[self.get_node(i) for i in range(self.total_nodes)] @property def intervals(self, to_datetime=False) -> List[Any]: \"\"\" Returns", "self.to_df() self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True)", "for prefix, plugin in plugin_prefixes.items(): if label.startswith(prefix): return plugin return 'Not Found' #", "Retuns all nodes data as a DataFrame \"\"\" if self.cache['df'] is None: self.cache['df']", "'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for nt in plugin['nodeTypes']], } edges.extend([et['prefix'] for et", "for acc in self.accounts_data['accounts'] if 'allocationHistory' in acc['account']] if len(grain_history) > 0: grain_distribution", "computed \"\"\" return self.cred_data['intervals'] def get_dt_intervals(self) -> List[Any]: \"\"\" Return intervals in datetime", "[0.] * len(self.intervals)]).T self.cache['df_grain'].columns = ['credTimestampMs', 'amount'] return self.cache['df_grain'] def get_cred_over_time(self) -> pd.DataFrame:", "= nt['weight'] plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''):", "x: x[0]) self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1]) self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) #", "distributedCred = self.df.totalCred.sum() # self.df['credShare'] = self.df.totalCred / distributedCred return self.cache['df'] def get_cred_flow_from_graph(self)", "= self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) # NODES df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns =", "plugin in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] = { 'nodePrefix': plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'], 'edgeTypes': [{'prefix': et['prefix'],", "from Sourcecred - Works with TimelineCred data format (sourcecred <= v0.7x) \"\"\" def", "'output/accounts.json' file \"\"\" if self.cache['df_accounts'] is None: self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float)", "= [] nodes = [] # edges_weights = dict() # nodes_weights = dict()", "raking by total amount of cred gained so far \"\"\" if self.cache['df_rank'] is", "Dict[str, Any]: \"\"\" Weighted graph from CredResult JSON data \"\"\" return self.weighted_graph def", "self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY'] def get_user_ranking(self) -> pd.DataFrame: \"\"\" Returns the user raking by", "= self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype', 'account.active', 'account.balance', 'account.paid' ]] self.cache['df_rank'] = df_acc_p.join(df_rank_p, on='account.identity.id', how='inner'", "Gets cred flow through nodes & edges in the cred graph. \"\"\" if", "for p_name in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta}) # EDGES", "\"\"\" Total amount of nodes (users, posts, etc) in the graph \"\"\" return", "= dict() for plugin in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] = { 'nodePrefix': plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'],", "self.cache['df_accounts']['account.balance'].map(float) / 1e18 self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18 return self.cache['df_accounts'] def get_user_nodes(self) ->", "* 100 df_rank_p.set_index('address.id', inplace=True) df_acc_p = self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype', 'account.active', 'account.balance', 'account.paid' ]]", "\"\"\" Gets cred flow through nodes & edges in the cred graph. \"\"\"", "information \"\"\" node = dict() address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] = f'{address[0]}/{address[1]}' node['address.nodeType'] =", "was computed \"\"\" return self.cred_data['intervals'] def get_dt_intervals(self) -> List[Any]: \"\"\" Return intervals in", "= ['id', 'user', 'type', 'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare'] return self.cache['df_rank'] def", "-> pd.DataFrame: \"\"\" Retuns all nodes data as a DataFrame \"\"\" if self.cache['df']", "nodes_weights = dict() for plugin in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] = { 'nodePrefix': plugin['nodePrefix'], 'edgePrefix':", "flow through nodes & edges in the cred graph. \"\"\" if self.cache['df_cred_eflow'] is", "Dict[str, Any]: \"\"\" Returns specifc node's information \"\"\" node = dict() address =", "pd.DataFrame]: \"\"\" Gets cred flow through nodes & edges in the cred graph.", "None: self.to_df() return self.cache['df'].totalCred.sum() @property def distributed_grain(self) -> float: \"\"\" Returns total distributed", "for plugin in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] = { 'nodePrefix': plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'], 'edgeTypes': [{'prefix':", "'')] = df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns = ['weight'] self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin) return", "amount of nodes (users, posts, etc) in the graph \"\"\" return len(self.cred_data['nodeSummaries']) @property", "\"\"\" Retuns all nodes data as a DataFrame \"\"\" if self.cache['df'] is None:", "self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0]) self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1]) self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin)", "# nodes_weights = dict() for plugin in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] = { 'nodePrefix': plugin['nodePrefix'],", "{ 'nodePrefix': plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'], 'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']} for et in", "Weighted graph from CredResult JSON data \"\"\" return self.weighted_graph def get_cred_data(self) -> Dict[str,", "self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype', 'account.active', 'account.balance', 'account.paid' ]] self.cache['df_rank'] = df_acc_p.join(df_rank_p, on='account.identity.id', how='inner' ).sort_values('totalCred',", "is None: self.to_df() self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs',", "cred_edges = dict() for e in edges: cred_edges[e.replace('\\x00', '')] = [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum()", "dict() # nodes_weights = dict() for plugin in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] = { 'nodePrefix':", "None, 'df_accounts': None, 'df_cred_ot': None, 'df_cred_eflow': None, 'df_cred_nflow': None, } def get_weighted_graph(self, data)", "self.accounts_data = accounts_data self.cache = { 'df': None, 'df_rank': None, 'df_grain': None, 'df_accounts':", "with TimelineCred data format (sourcecred <= v0.7x) \"\"\" def __init__(self, cred_data, accounts_data): self.cred_json_data", "[] # edges_weights = dict() # nodes_weights = dict() for plugin in self.cred_json_data[1]['plugins'][1]:", "et in plugin_meta[plugin['name']]['edgeTypes']]) # for et in plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']] = et['weight'] nodes.extend([nt['prefix']", "None return node @property def total_nodes(self) -> int: \"\"\" Total amount of nodes", "for nt in plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']] = nt['weight'] plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00', ''): p_name", "in self.accounts_data['accounts'] if 'allocationHistory' in acc['account']] if len(grain_history) > 0: grain_distribution = [{'credTimestampMs':", "plugin['edgeTypes']], 'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for nt in plugin['nodeTypes']], } edges.extend([et['prefix'] for", "if self.cache['df_accounts'] is None: self.cache['df_accounts'] = pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18 self.cache['df_accounts']['account.paid']", "\"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY'] def get_user_ranking(self) ->", "in plugin_meta[plugin['name']]['edgeTypes']]) # for et in plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']] = et['weight'] nodes.extend([nt['prefix'] for", "- Works with TimelineCred data format (sourcecred <= v0.7x) \"\"\" def __init__(self, cred_data,", "inplace=True) return self.cache['df_cred_ot'] def to_df(self) -> pd.DataFrame: \"\"\" Retuns all nodes data as", "in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns = ['edge', 'backward', 'forward']", "'account.identity.subtype', 'account.active', 'account.balance', 'account.paid' ]] self.cache['df_rank'] = df_acc_p.join(df_rank_p, on='account.identity.id', how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns", "self.cache['df_accounts'] def get_user_nodes(self) -> pd.DataFrame: \"\"\" Returns user nodes in the graph \"\"\"", "for nt in plugin_meta[plugin['name']]['nodeTypes']]) # for nt in plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']] = nt['weight']", "# self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) # distributed_cred = self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred", "= self.df.totalCred / distributedCred return self.cache['df'] def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Gets", "* 100 df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']] distributed_cred = df_rank_p.totalCred.sum() df_rank_p['credShare'] = (df_rank_p.totalCred", "in plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']] = et['weight'] nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']]) # for", "'account.identity.name', 'account.identity.subtype', 'account.active', 'account.balance', 'account.paid' ]] self.cache['df_rank'] = df_acc_p.join(df_rank_p, on='account.identity.id', how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True)", "data \"\"\" return self.weighted_graph def get_cred_data(self) -> Dict[str, Any]: \"\"\" Raw CredResult JSON", "cred was computed \"\"\" return self.cred_data['intervals'] def get_dt_intervals(self) -> List[Any]: \"\"\" Return intervals", "label.startswith(prefix): return plugin return 'Not Found' # PREPROCESSING plugin_meta = dict() edges =", "= [acc for acc in self.accounts_data['accounts'] if 'allocationHistory' in acc['account']] if len(grain_history) >", "= cred_data[1]['credData'] self.accounts_data = accounts_data self.cache = { 'df': None, 'df_rank': None, 'df_grain':", "pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns = ['node', 'weight'] cred_nodes = dict() for n in", "in self.intervals] @property def distributed_cred(self) -> float: \"\"\" Returns total distributed cred \"\"\"", "cred_edges[e.replace('\\x00', '')] = [ df_ew[df_ew.edge.str.startswith(e)].backward.sum(), df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda", "unit='ms') # distributedCred = self.df.totalCred.sum() # self.df['credShare'] = self.df.totalCred / distributedCred return self.cache['df']", "et in plugin['edgeTypes']], 'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for nt in plugin['nodeTypes']], }", "on='account.identity.id', how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns = ['id', 'user', 'type', 'active', 'grainBalance', 'grainPaid', 'totalCred',", "pd.DataFrame: \"\"\" Returns user accounts info from 'output/accounts.json' file \"\"\" if self.cache['df_accounts'] is", "accounts_data): self.cred_json_data = cred_data self.weighted_graph = cred_data[1]['weightedGraph'][1] self.cred_data = cred_data[1]['credData'] self.accounts_data = accounts_data", "@property def distributed_grain(self) -> float: \"\"\" Returns total distributed grain \"\"\" if self.cache['df_grain']", "= ['edge', 'backward', 'forward'] cred_edges = dict() for e in edges: cred_edges[e.replace('\\x00', '')]", "self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T self.cache['df_grain'].columns = ['credTimestampMs', 'amount'] return self.cache['df_grain'] def", "]] self.cache['df_rank'] = df_acc_p.join(df_rank_p, on='account.identity.id', how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns = ['id', 'user', 'type',", "Returns user accounts info from 'output/accounts.json' file \"\"\" if self.cache['df_accounts'] is None: self.cache['df_accounts']", "self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else [] node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] =", "node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY' else None return node @property def", "def __repr__(self) -> str: return \"<{} - ({} nodes & {} distributed CRED)>\".format(self.__class__.__name__,", "etc) in the graph \"\"\" return len(self.cred_data['nodeSummaries']) @property def nodes(self) -> List[Any]: \"\"\"", "-> List[Any]: \"\"\" Return intervals in datetime format \"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000)", "\"\"\" Returns user nodes in the graph \"\"\" if self.cache['df'] is None: self.to_df()", "List[Any]: \"\"\" Returns timestamp intervals where cred was computed \"\"\" return self.cred_data['intervals'] def", "v0.7x) \"\"\" def __init__(self, cred_data, accounts_data): self.cred_json_data = cred_data self.weighted_graph = cred_data[1]['weightedGraph'][1] self.cred_data", "self.cred_data = cred_data[1]['credData'] self.accounts_data = accounts_data self.cache = { 'df': None, 'df_rank': None,", "edges in the cred graph. \"\"\" if self.cache['df_cred_eflow'] is None: def set_plugin(label): for", "(self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def __repr__(self) -> str: return \"<{} - ({} nodes & {}", "i in range(self.total_nodes)] @property def intervals(self, to_datetime=False) -> List[Any]: \"\"\" Returns timestamp intervals", "pd from typing import Any, Dict, List, Tuple class CredData(): \"\"\" Parses information", "Returns total distributed grain \"\"\" if self.cache['df_grain'] is None: self.get_grain_distribution() return self.cache['df_grain'].amount.sum() @property", "int: \"\"\" Total amount of nodes (users, posts, etc) in the graph \"\"\"", "node['address.nodeType'] = address[2] node['address.id'] = address[3] node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if", "so far \"\"\" if self.cache['df_rank'] is None: # self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) #", "= df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns = ['weight'] self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'],", "def to_df(self) -> pd.DataFrame: \"\"\" Retuns all nodes data as a DataFrame \"\"\"", "[v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns = ['edge', 'backward', 'forward'] cred_edges =", "'df_grain': None, 'df_accounts': None, 'df_cred_ot': None, 'df_cred_eflow': None, 'df_cred_nflow': None, } def get_weighted_graph(self,", "node's information \"\"\" node = dict() address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] = f'{address[0]}/{address[1]}' node['address.nodeType']", "nodes(self) -> List[Any]: \"\"\" Returns all nodes in the graph \"\"\" return [self.get_node(i)", "grain \"\"\" if self.cache['df_grain'] is None: self.get_grain_distribution() return self.cache['df_grain'].amount.sum() @property def accounts(self) ->", "\"\"\" if self.cache['df_cred_ot'] is None: if self.cache['df'] is None: self.to_df() self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(),", "self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1]) self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) # NODES df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(),", "\"\"\" Returns total distributed grain \"\"\" if self.cache['df_grain'] is None: self.get_grain_distribution() return self.cache['df_grain'].amount.sum()", "in nodes: cred_nodes[n.replace('\\x00', '')] = df_nw[df_nw.node.str.startswith(n)].weight.sum() self.cache['df_cred_nflow'] = pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns = ['weight'] self.cache['df_cred_nflow']['plugin']", "df_acc_p.join(df_rank_p, on='account.identity.id', how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns = ['id', 'user', 'type', 'active', 'grainBalance', 'grainPaid',", "None: def set_plugin(label): for prefix, plugin in plugin_prefixes.items(): if label.startswith(prefix): return plugin return", "self.cache['df_rank'].columns = ['id', 'user', 'type', 'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare'] return self.cache['df_rank']", "]).T df_ew.columns = ['edge', 'backward', 'forward'] cred_edges = dict() for e in edges:", "to_datetime=False) -> List[Any]: \"\"\" Returns timestamp intervals where cred was computed \"\"\" return", "is None: # self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) # distributed_cred = self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare']", "acc in self.accounts_data['accounts'] if 'allocationHistory' in acc['account']] if len(grain_history) > 0: grain_distribution =", "''): p_name for p_name in plugin_meta}) # EDGES df_ew = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['edgeWeights'].keys(), [v['backwards'] for", "['edge', 'backward', 'forward'] cred_edges = dict() for e in edges: cred_edges[e.replace('\\x00', '')] =", "def get_node(self, i: int) -> Dict[str, Any]: \"\"\" Returns specifc node's information \"\"\"", "def get_cred_data(self) -> Dict[str, Any]: \"\"\" Raw CredResult JSON data \"\"\" return self.cred_data", "= et['weight'] nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']]) # for nt in plugin_meta[plugin['name']]['nodeTypes']: #", "if self.cache['df'] is None: self.to_df() return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY'] def get_user_ranking(self) -> pd.DataFrame:", "= df_acc_p.join(df_rank_p, on='account.identity.id', how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns = ['id', 'user', 'type', 'active', 'grainBalance',", "the graph \"\"\" return [self.get_node(i) for i in range(self.total_nodes)] @property def intervals(self, to_datetime=False)", "@property def accounts(self) -> pd.DataFrame: \"\"\" Returns user accounts info from 'output/accounts.json' file", "self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY' else None return node @property def total_nodes(self) ->", "-> Dict[str, Any]: \"\"\" Returns specifc node's information \"\"\" node = dict() address", "return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval in self.intervals] @property def distributed_cred(self) -> float:", "if self.cache['df'] is None: self.cache['df'] = pd.json_normalize(self.nodes) self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms') # distributedCred", "[] nodes = [] # edges_weights = dict() # nodes_weights = dict() for", "df_acc_p = self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype', 'account.active', 'account.balance', 'account.paid' ]] self.cache['df_rank'] = df_acc_p.join(df_rank_p, on='account.identity.id',", "edges_weights[et['prefix']] = et['weight'] nodes.extend([nt['prefix'] for nt in plugin_meta[plugin['name']]['nodeTypes']]) # for nt in plugin_meta[plugin['name']]['nodeTypes']:", "# for et in plugin_meta[plugin['name']]['edgeTypes']: # edges_weights[et['prefix']] = et['weight'] nodes.extend([nt['prefix'] for nt in", "how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns = ['id', 'user', 'type', 'active', 'grainBalance', 'grainPaid', 'totalCred', 'credOverTime',", "JSON data \"\"\" return self.weighted_graph def get_cred_data(self) -> Dict[str, Any]: \"\"\" Raw CredResult", "record['credTimestampMs'], 'amount': int(record['grainReceipt']['amount']) / 1e18} \\ for acc in grain_history for record in", "None, 'df_cred_eflow': None, 'df_cred_nflow': None, } def get_weighted_graph(self, data) -> Dict[str, Any]: \"\"\"", "Sourcecred - Works with TimelineCred data format (sourcecred <= v0.7x) \"\"\" def __init__(self,", "-> pd.DataFrame: \"\"\" Returns distributed cred summary over all intervals \"\"\" if self.cache['df_cred_ot']", "Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Gets cred flow through nodes & edges in the cred", "x: x[1]) self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) # NODES df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values()", "dict() address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] = f'{address[0]}/{address[1]}' node['address.nodeType'] = address[2] node['address.id'] = address[3]", "self.cache['df_rank'] def get_grain_distribution(self) -> pd.DataFrame: \"\"\" Returns the history of grain distribution \"\"\"", "df_rank_p.totalCred.sum() df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred) * 100 df_rank_p.set_index('address.id', inplace=True) df_acc_p = self.accounts[['account.identity.id',", "pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0]) self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1]) self.cache['df_cred_eflow']['plugin'] =", "= (df_rank_p.totalCred / distributed_cred) * 100 df_rank_p.set_index('address.id', inplace=True) df_acc_p = self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype',", "self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns = ['node', 'weight'] cred_nodes = dict() for n in nodes:", "self.cache['df_grain'].columns = ['credTimestampMs', 'amount'] return self.cache['df_grain'] def get_cred_over_time(self) -> pd.DataFrame: \"\"\" Returns distributed", "-> str: return \"<{} - ({} nodes & {} distributed CRED)>\".format(self.__class__.__name__, self.total_nodes, self.distributed_cred)", "Parses information from Sourcecred - Works with TimelineCred data format (sourcecred <= v0.7x)", "in acc['account']['allocationHistory']] self.cache['df_grain'] = pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else: # zeros self.cache['df_grain']", "plugin_meta = dict() edges = [] nodes = [] # edges_weights = dict()", "import datetime import pandas as pd from typing import Any, Dict, List, Tuple", "= accounts_data self.cache = { 'df': None, 'df_rank': None, 'df_grain': None, 'df_accounts': None,", "distributed_cred) * 100 df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']] distributed_cred = df_rank_p.totalCred.sum() df_rank_p['credShare'] =", "self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else: # zeros self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.] * len(self.intervals)]).T", "[] node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] ==", "df_rank_p.set_index('address.id', inplace=True) df_acc_p = self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype', 'account.active', 'account.balance', 'account.paid' ]] self.cache['df_rank'] =", "Return intervals in datetime format \"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval in", "is None: def set_plugin(label): for prefix, plugin in plugin_prefixes.items(): if label.startswith(prefix): return plugin", "self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms') # distributedCred = self.df.totalCred.sum() # self.df['credShare'] = self.df.totalCred /", "return self.cred_data['intervals'] def get_dt_intervals(self) -> List[Any]: \"\"\" Return intervals in datetime format \"\"\"", "'account.paid' ]] self.cache['df_rank'] = df_acc_p.join(df_rank_p, on='account.identity.id', how='inner' ).sort_values('totalCred', ascending=False).reset_index(drop=True) self.cache['df_rank'].columns = ['id', 'user',", "self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True) return", "the graph \"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY'] def", "= ['weight'] self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def __repr__(self) -> str: return", "plugin['edgePrefix'], 'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']} for et in plugin['edgeTypes']], 'nodeTypes': [{'prefix': nt['prefix'],", "pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True, inplace=True) return self.cache['df_cred_ot'] def", "node['timestamp'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['timestampMs'] node['user'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] if node['address.nodeType'] == 'IDENTITY' else None return", "import Any, Dict, List, Tuple class CredData(): \"\"\" Parses information from Sourcecred -", "= self.df.totalCred.sum() # self.df['credShare'] = self.df.totalCred / distributedCred return self.cache['df'] def get_cred_flow_from_graph(self) ->", "inplace=True) df_acc_p = self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype', 'account.active', 'account.balance', 'account.paid' ]] self.cache['df_rank'] = df_acc_p.join(df_rank_p,", "get_weighted_graph(self, data) -> Dict[str, Any]: \"\"\" Weighted graph from CredResult JSON data \"\"\"", "Total amount of nodes (users, posts, etc) in the graph \"\"\" return len(self.cred_data['nodeSummaries'])", "self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()], [v['forwards'] for v in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns = ['edge', 'backward', 'forward'] cred_edges", "df_nw = pd.DataFrame([self.weighted_graph['weightsJSON'][1]['nodeWeights'].keys(), self.weighted_graph['weightsJSON'][1]['nodeWeights'].values() ]).T df_nw.columns = ['node', 'weight'] cred_nodes = dict() for", "from CredResult JSON data \"\"\" return self.weighted_graph def get_cred_data(self) -> Dict[str, Any]: \"\"\"", "x[0]) self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1]) self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) # NODES", "self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else [] node['description'] = self.weighted_graph['graphJSON'][1]['nodes'][i]['description'] node['timestamp'] =", "in range(self.total_nodes)] @property def intervals(self, to_datetime=False) -> List[Any]: \"\"\" Returns timestamp intervals where", "= { 'nodePrefix': plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'], 'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']} for et", "plugin in plugin_prefixes.items(): if label.startswith(prefix): return plugin return 'Not Found' # PREPROCESSING plugin_meta", "intervals(self, to_datetime=False) -> List[Any]: \"\"\" Returns timestamp intervals where cred was computed \"\"\"", "self.df.totalCred.sum() # self.df['credShare'] = self.df.totalCred / distributedCred return self.cache['df'] def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame,", "nodes data as a DataFrame \"\"\" if self.cache['df'] is None: self.cache['df'] = pd.json_normalize(self.nodes)", "\"\"\" return self.weighted_graph def get_cred_data(self) -> Dict[str, Any]: \"\"\" Raw CredResult JSON data", "= self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']] distributed_cred = df_rank_p.totalCred.sum() df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred) *", "specifc node's information \"\"\" node = dict() address = self.weighted_graph['graphJSON'][1]['sortedNodeAddresses'][i] node['address.source'] = f'{address[0]}/{address[1]}'", "else None return node @property def total_nodes(self) -> int: \"\"\" Total amount of", "for interval in self.intervals] @property def distributed_cred(self) -> float: \"\"\" Returns total distributed", "plugin return 'Not Found' # PREPROCESSING plugin_meta = dict() edges = [] nodes", "-> Dict[str, Any]: \"\"\" Weighted graph from CredResult JSON data \"\"\" return self.weighted_graph", "nt in plugin_meta[plugin['name']]['nodeTypes']]) # for nt in plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']] = nt['weight'] plugin_prefixes", "total_nodes(self) -> int: \"\"\" Total amount of nodes (users, posts, etc) in the", "= self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True) # distributed_cred = self.cache['df_rank'].totalCred.sum() # self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred)", "pandas as pd from typing import Any, Dict, List, Tuple class CredData(): \"\"\"", "Works with TimelineCred data format (sourcecred <= v0.7x) \"\"\" def __init__(self, cred_data, accounts_data):", "accounts(self) -> pd.DataFrame: \"\"\" Returns user accounts info from 'output/accounts.json' file \"\"\" if", "= self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def __repr__(self) -> str: return \"<{} - ({}", "'totalCred', 'credOverTime']] distributed_cred = df_rank_p.totalCred.sum() df_rank_p['credShare'] = (df_rank_p.totalCred / distributed_cred) * 100 df_rank_p.set_index('address.id',", "# edges_weights = dict() # nodes_weights = dict() for plugin in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']]", "'df_rank': None, 'df_grain': None, 'df_accounts': None, 'df_cred_ot': None, 'df_cred_eflow': None, 'df_cred_nflow': None, }", "interval in self.intervals] @property def distributed_cred(self) -> float: \"\"\" Returns total distributed cred", "df_ew[df_ew.edge.str.startswith(e)].forward.sum() ] self.cache['df_cred_eflow'] = pd.json_normalize(cred_edges).T self.cache['df_cred_eflow']['backward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0]) self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda", "get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Gets cred flow through nodes & edges in", "in self.weighted_graph['weightsJSON'][1]['edgeWeights'].values()] ]).T df_ew.columns = ['edge', 'backward', 'forward'] cred_edges = dict() for e", "nodes = [] # edges_weights = dict() # nodes_weights = dict() for plugin", "df_ew.columns = ['edge', 'backward', 'forward'] cred_edges = dict() for e in edges: cred_edges[e.replace('\\x00',", "pd.json_normalize(cred_nodes).T self.cache['df_cred_nflow'].columns = ['weight'] self.cache['df_cred_nflow']['plugin'] = self.cache['df_cred_nflow'].index.map(set_plugin) return (self.cache['df_cred_nflow'], self.cache['df_cred_eflow']) def __repr__(self) ->", "self.cache['df'] is None: self.to_df() return self.cache['df'].totalCred.sum() @property def distributed_grain(self) -> float: \"\"\" Returns", "\"\"\" Parses information from Sourcecred - Works with TimelineCred data format (sourcecred <=", "\"\"\" Returns user accounts info from 'output/accounts.json' file \"\"\" if self.cache['df_accounts'] is None:", "self.cache['df_accounts']['account.paid'].map(float) / 1e18 return self.cache['df_accounts'] def get_user_nodes(self) -> pd.DataFrame: \"\"\" Returns user nodes", "__init__(self, cred_data, accounts_data): self.cred_json_data = cred_data self.weighted_graph = cred_data[1]['weightedGraph'][1] self.cred_data = cred_data[1]['credData'] self.accounts_data", "of cred gained so far \"\"\" if self.cache['df_rank'] is None: # self.cache['df_rank'] =", "'grainBalance', 'grainPaid', 'totalCred', 'credOverTime', 'credShare'] return self.cache['df_rank'] def get_grain_distribution(self) -> pd.DataFrame: \"\"\" Returns", "= self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1]) self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0], inplace=True) # NODES df_nw =", "def get_user_ranking(self) -> pd.DataFrame: \"\"\" Returns the user raking by total amount of", "node['address.id'] = address[3] node['totalCred'] = self.cred_data['nodeSummaries'][i]['cred'] node['credOverTime'] = self.cred_data['nodeOverTime'][i]['cred'] if self.cred_data['nodeOverTime'][i] else []", "DataFrame \"\"\" if self.cache['df'] is None: self.cache['df'] = pd.json_normalize(self.nodes) self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms')", "get_cred_data(self) -> Dict[str, Any]: \"\"\" Raw CredResult JSON data \"\"\" return self.cred_data def", "/ distributedCred return self.cache['df'] def get_cred_flow_from_graph(self) -> Tuple[pd.DataFrame, pd.DataFrame]: \"\"\" Gets cred flow", "'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']} for et in plugin['edgeTypes']], 'nodeTypes': [{'prefix': nt['prefix'], 'weight':", "get_cred_over_time(self) -> pd.DataFrame: \"\"\" Returns distributed cred summary over all intervals \"\"\" if", "datetime format \"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for interval in self.intervals] @property def", "List[Any]: \"\"\" Return intervals in datetime format \"\"\" return [datetime.fromtimestamp(interval[('endTimeMs')] / 1000) for", "= dict() # nodes_weights = dict() for plugin in self.cred_json_data[1]['plugins'][1]: plugin_meta[plugin['name']] = {", "drop=True, inplace=True) return self.cache['df_cred_ot'] def to_df(self) -> pd.DataFrame: \"\"\" Retuns all nodes data", "JSON data \"\"\" return self.cred_data def get_node(self, i: int) -> Dict[str, Any]: \"\"\"", "'grainPaid', 'totalCred', 'credOverTime', 'credShare'] return self.cache['df_rank'] def get_grain_distribution(self) -> pd.DataFrame: \"\"\" Returns the", "\"\"\" return self.cred_data['intervals'] def get_dt_intervals(self) -> List[Any]: \"\"\" Return intervals in datetime format", "None: self.to_df() return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY'] def get_user_ranking(self) -> pd.DataFrame: \"\"\" Returns the", "return self.cache['df_grain'].amount.sum() @property def accounts(self) -> pd.DataFrame: \"\"\" Returns user accounts info from", "in grain_history for record in acc['account']['allocationHistory']] self.cache['df_grain'] = pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms')", "graph \"\"\" return len(self.cred_data['nodeSummaries']) @property def nodes(self) -> List[Any]: \"\"\" Returns all nodes", "def get_grain_distribution(self) -> pd.DataFrame: \"\"\" Returns the history of grain distribution \"\"\" if", "plugin_meta[plugin['name']]['nodeTypes']]) # for nt in plugin_meta[plugin['name']]['nodeTypes']: # nodes_weights[nt['prefix']] = nt['weight'] plugin_prefixes = {plugin_meta[p_name]['nodePrefix'].replace('\\x00',", "self.cache = { 'df': None, 'df_rank': None, 'df_grain': None, 'df_accounts': None, 'df_cred_ot': None,", "node @property def total_nodes(self) -> int: \"\"\" Total amount of nodes (users, posts,", "= self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[0]) self.cache['df_cred_eflow']['forward'] = self.cache['df_cred_eflow'].iloc[:,0].apply(lambda x: x[1]) self.cache['df_cred_eflow']['plugin'] = self.cache['df_cred_eflow'].index.map(set_plugin) self.cache['df_cred_eflow'].drop(columns=[0],", "return self.cache['df_rank'] def get_grain_distribution(self) -> pd.DataFrame: \"\"\" Returns the history of grain distribution", "return self.weighted_graph def get_cred_data(self) -> Dict[str, Any]: \"\"\" Raw CredResult JSON data \"\"\"", "self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18 self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18 return self.cache['df_accounts'] def", "== 'IDENTITY'] def get_user_ranking(self) -> pd.DataFrame: \"\"\" Returns the user raking by total", "get_user_ranking(self) -> pd.DataFrame: \"\"\" Returns the user raking by total amount of cred", "& edges in the cred graph. \"\"\" if self.cache['df_cred_eflow'] is None: def set_plugin(label):", "Returns distributed cred summary over all intervals \"\"\" if self.cache['df_cred_ot'] is None: if", "for i in range(self.total_nodes)] @property def intervals(self, to_datetime=False) -> List[Any]: \"\"\" Returns timestamp", "in the graph \"\"\" if self.cache['df'] is None: self.to_df() return self.cache['df'][self.cache['df']['address.nodeType'] == 'IDENTITY']", "pd.json_normalize(self.accounts_data['accounts']) self.cache['df_accounts']['account.balance'] = self.cache['df_accounts']['account.balance'].map(float) / 1e18 self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18 return self.cache['df_accounts']", "''): p_name for p_name in plugin_meta} plugin_prefixes.update({plugin_meta[p_name]['edgePrefix'].replace('\\x00', ''): p_name for p_name in plugin_meta})", "\"\"\" if self.cache['df_cred_eflow'] is None: def set_plugin(label): for prefix, plugin in plugin_prefixes.items(): if", "100 df_rank_p.set_index('address.id', inplace=True) df_acc_p = self.accounts[['account.identity.id', 'account.identity.name', 'account.identity.subtype', 'account.active', 'account.balance', 'account.paid' ]] self.cache['df_rank']", "None, } def get_weighted_graph(self, data) -> Dict[str, Any]: \"\"\" Weighted graph from CredResult", "= pd.json_normalize(grain_distribution) self.cache['df_grain']['credTimestampMs'] = pd.to_datetime(self.cache['df_grain']['credTimestampMs'], unit='ms') else: # zeros self.cache['df_grain'] = pd.DataFrame([self.get_dt_intervals(), [0.]", "all intervals \"\"\" if self.cache['df_cred_ot'] is None: if self.cache['df'] is None: self.to_df() self.cache['df_cred_ot']", "\"\"\" if self.cache['df'] is None: self.cache['df'] = pd.json_normalize(self.nodes) self.cache['df'].timestamp = pd.to_datetime(self.cache['df'].timestamp, unit='ms') #", "self.cache['df_accounts']['account.paid'] = self.cache['df_accounts']['account.paid'].map(float) / 1e18 return self.cache['df_accounts'] def get_user_nodes(self) -> pd.DataFrame: \"\"\" Returns", "# self.cache['df_rank']['credShare'] = (self.cache['df_rank'].totalCred / distributed_cred) * 100 df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']]", "Returns the history of grain distribution \"\"\" if self.cache['df_grain'] is None: grain_history =", "plugin_meta[plugin['name']] = { 'nodePrefix': plugin['nodePrefix'], 'edgePrefix': plugin['edgePrefix'], 'edgeTypes': [{'prefix': et['prefix'], 'weight': et['defaultWeight']} for", "import pandas as pd from typing import Any, Dict, List, Tuple class CredData():", "gained so far \"\"\" if self.cache['df_rank'] is None: # self.cache['df_rank'] = self.get_user_nodes().sort_values('totalCred', ascending=False).reset_index(drop=True)", "in plugin['edgeTypes']], 'nodeTypes': [{'prefix': nt['prefix'], 'weight': nt['defaultWeight']} for nt in plugin['nodeTypes']], } edges.extend([et['prefix']", "/ distributed_cred) * 100 df_rank_p = self.get_user_nodes()[['address.id', 'totalCred', 'credOverTime']] distributed_cred = df_rank_p.totalCred.sum() df_rank_p['credShare']", "plugin_prefixes.items(): if label.startswith(prefix): return plugin return 'Not Found' # PREPROCESSING plugin_meta = dict()", "range(self.total_nodes)] @property def intervals(self, to_datetime=False) -> List[Any]: \"\"\" Returns timestamp intervals where cred", "None: self.to_df() self.cache['df_cred_ot'] = pd.DataFrame([self.get_dt_intervals(), pd.DataFrame(self.cache['df'].credOverTime.to_list()).sum() ]).T self.cache['df_cred_ot'].columns = ['credTimestampMs', 'amount'] self.cache['df_cred_ot'].set_index('credTimestampMs', drop=True," ]
[ "MulticoreTSNE import MulticoreTSNE as TSNE import numpy as np data = np.loadtxt('pca.csv', delimiter=',')", "data = np.loadtxt('pca.csv', delimiter=',') tsne = TSNE(n_jobs=4) Y = tsne.fit_transform(data) np.savetxt('tsne_matrix.csv', Y, delimiter=\",\")", "as np data = np.loadtxt('pca.csv', delimiter=',') tsne = TSNE(n_jobs=4) Y = tsne.fit_transform(data) np.savetxt('tsne_matrix.csv',", "import MulticoreTSNE as TSNE import numpy as np data = np.loadtxt('pca.csv', delimiter=',') tsne", "np data = np.loadtxt('pca.csv', delimiter=',') tsne = TSNE(n_jobs=4) Y = tsne.fit_transform(data) np.savetxt('tsne_matrix.csv', Y,", "import numpy as np data = np.loadtxt('pca.csv', delimiter=',') tsne = TSNE(n_jobs=4) Y =", "from MulticoreTSNE import MulticoreTSNE as TSNE import numpy as np data = np.loadtxt('pca.csv',", "as TSNE import numpy as np data = np.loadtxt('pca.csv', delimiter=',') tsne = TSNE(n_jobs=4)", "MulticoreTSNE as TSNE import numpy as np data = np.loadtxt('pca.csv', delimiter=',') tsne =", "<filename>utils/generate_tsne_data.py from MulticoreTSNE import MulticoreTSNE as TSNE import numpy as np data =", "TSNE import numpy as np data = np.loadtxt('pca.csv', delimiter=',') tsne = TSNE(n_jobs=4) Y", "numpy as np data = np.loadtxt('pca.csv', delimiter=',') tsne = TSNE(n_jobs=4) Y = tsne.fit_transform(data)" ]
[ "is python code # evaluated to a tuple. try: window = literal_eval(window) except", "np import cupy as cp from cusignal.filtering.resample import resample_poly as curesamp from scipy.signal", "E131,E501 'runs on GPU via cusignal.', 'default': False }, 'padtype': { 'type': 'string',", "window ' 'is connected it takes precedence. Default ' '(\"kaiser\", 5.0)', 'default': '(\"kaiser\",", "filter, or the FIR filter coefficients to ' # noqa: E131,E501 'employ. Window", "= siresamp( signal_in, up, down, axis=axis, window=window, padtype=padtype, cval=cval) else: signal_out = curesamp(", "= inputs.get('samplerate', None) new_samplerate = self.conf.get('new_samplerate', None) if new_samplerate and samplerate: ud =", "Node): def init(self): TemplateNodeMixin.init(self) inports = { 'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]}, 'samplerate': {", "'new_samplerate': { 'type': 'number', 'description': 'Desired sample rate. Specify this or the '", "PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True }, 'window': { PortsSpecSchema.port_type: [cp.ndarray, np.ndarray],", "'Default is 0.', # noqa: E131,E501 'default': 0, 'minimum': 0, }, 'window': {", "None) if new_samplerate and samplerate: ud = Fraction(new_samplerate / samplerate).limit_denominator() up = ud.numerator", "False }, 'padtype': { 'type': 'string', 'description': 'Only used when `use_cpu` is set.", "a string choose one of ' 'available windows. If a tuple refer to", "import (Node, PortsSpecSchema, ConfSchema) from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin from ..windows import _WINS_CONFIG __all__", "Filter Resample Node', 'type': 'object', 'description': _RESAMPLEPOLY_DESC, 'properties': { 'new_samplerate': { 'type': 'number',", "given axis using polyphase filtering. The signal is upsampled by the factor `up`,", "'samplerate_out': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True } } self.template_ports_setup(in_ports=inports, out_ports=outports)", "Node', 'type': 'object', 'description': _RESAMPLEPOLY_DESC, 'properties': { 'new_samplerate': { 'type': 'number', 'description': 'Desired", "of `resample_poly`. This is not ' # noqa: E131,E501 'currently exposed in cusignal.',", "(\"kaiser\", 5.0)) if isinstance(window, str): windows_enum = list(_WINS_CONFIG.keys()) # window could be a", "' 'be the FIR filter coefficients. Note that the FIR ' 'filter is", "'down': { 'type': 'integer', 'description': 'The downsampling factor.' }, 'axis': { 'type': 'integer',", "via ports, otherwise up/down is used. ' 'If both are set then this", "be specified as a string, a ' 'tuple, or a list. If a", "rate. ''' class CusignalResamplePolyNode(TemplateNodeMixin, Node): def init(self): TemplateNodeMixin.init(self) inports = { 'signal': {PortsSpecSchema.port_type:", "Optional path for filter design.' ' gpupath == False may be desirable if", "self.conf['down'] if samplerate: samplerate = inputs['samplerate'] new_samplerate = samplerate * up / down", "window should be a string that is python code # evaluated to a", "is assumed to ' 'be the FIR filter coefficients. Note that the FIR", "from fractions import Fraction import numpy as np import cupy as cp from", "set then this takes precedence over ' 'up/down.' }, 'up': { 'type': 'integer',", "to use to design the ' 'low-pass filter, or the FIR filter coefficients", "RuntimeError('Uknown window: {}'.format(window)) gpupath = self.conf.get('gpupath', True) use_cpu = self.conf.get('use_cpu', False) if use_cpu:", "assumed to be zero during the filtering step. Returns resampled array and new", "E131,E501 } } } return ConfSchema(json=json) def process(self, inputs): signal_in = inputs['signal'] samplerate", "noqa: E131,E501 'is passed in via ports, otherwise up/down is used. ' 'If", "window = literal_eval(window) except Exception: raise RuntimeError('Uknown window: {}'.format(window)) gpupath = self.conf.get('gpupath', True)", "= literal_eval(window) except Exception: raise RuntimeError('Uknown window: {}'.format(window)) gpupath = self.conf.get('gpupath', True) use_cpu", "Scipy ' 'padtype parameter of `resample_poly`. This is not ' # noqa: E131,E501", "padtype = self.conf.get('padtype', 'constant') cval = self.conf.get('cval') signal_out = siresamp( signal_in, up, down,", "'type': 'boolean', 'description': 'use_cpu - Use CPU for computation via ' 'scipy::signal.resample_poly. Default", "5.0)) if isinstance(window, str): windows_enum = list(_WINS_CONFIG.keys()) # window could be a simple", "fractions import Fraction import numpy as np import cupy as cp from cusignal.filtering.resample", "# noqa: E131,E501 'are small.', 'default': True }, 'use_cpu': { 'type': 'boolean', 'description':", "= curesamp( signal_in, up, down, axis=axis, window=window, gpupath=gpupath) return {'signal_out': signal_out, 'samplerate_out': new_samplerate}", "False and ' # noqa: E131,E501 'runs on GPU via cusignal.', 'default': False", "on a signal at a ' 'sampling frequency higher than the original by", "filter is applied, and then it is downsampled by the factor `down`. The", "is a list it is assumed to ' 'be the FIR filter coefficients.", "small.', 'default': True }, 'use_cpu': { 'type': 'boolean', 'description': 'use_cpu - Use CPU", "the FIR filter coefficients to ' # noqa: E131,E501 'employ. Window can be", "def process(self, inputs): signal_in = inputs['signal'] samplerate = inputs.get('samplerate', None) new_samplerate = self.conf.get('new_samplerate',", "PortsSpecSchema.port_type: [cp.ndarray, np.ndarray], PortsSpecSchema.optional: True }, } outports = { 'signal_out': {PortsSpecSchema.port_type: '${port:signal}'},", "'constant' }, 'cval': { 'type': 'number', 'description': 'Only used when `use_cpu` is set.", "'description': 'Only used when `use_cpu` is set. Scipy ' 'padtype parameter of `resample_poly`.", "is downsampled by the factor `down`. The resulting sample rate is ``up /", "{ 'type': 'string', 'description': 'Only used when `use_cpu` is set. Scipy ' 'padtype", "beyond the boundary of the signal are assumed to be zero during the", "'description': 'Desired window to use to design the ' 'low-pass filter, or the", "coefficients. Note that the FIR ' 'filter is applied after the upsampling step,", "ports, otherwise up/down is used. ' 'If both are set then this takes", "noqa: E131,E501 'default': 0, 'minimum': 0, }, 'window': { 'type': 'string', 'description': 'Desired", "be desirable if filter sizes ' # noqa: E131,E501 'are small.', 'default': True", "'type': 'string', 'description': 'Only used when `use_cpu` is set. Scipy ' 'padtype parameter", "/ down`` times the original sample rate. Values beyond the boundary of the", "new_samplerate = samplerate * up / down else: new_samplerate = up / down", "* up / down else: new_samplerate = up / down axis = self.conf.get('axis',", "ast import literal_eval from fractions import Fraction import numpy as np import cupy", "string that is python code # evaluated to a tuple. try: window =", "{ 'type': 'integer', 'description': 'The upsampling factor.' }, 'down': { 'type': 'integer', 'description':", "ud.numerator down = ud.denominator else: up = self.conf['up'] down = self.conf['down'] if samplerate:", "True } } self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports = {'signal_out': {}, 'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports) def", "and then it is downsampled by the factor `down`. The resulting sample rate", "via cusignal.', 'default': False }, 'padtype': { 'type': 'string', 'description': 'Only used when", "window=window, padtype=padtype, cval=cval) else: signal_out = curesamp( signal_in, up, down, axis=axis, window=window, gpupath=gpupath)", "'string', 'description': 'Desired window to use to design the ' 'low-pass filter, or", "PortsSpecSchema, ConfSchema) from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin from ..windows import _WINS_CONFIG __all__ = ['CusignalResamplePolyNode']", "= inputs['samplerate'] new_samplerate = samplerate * up / down else: new_samplerate = up", "inputs['signal'] samplerate = inputs.get('samplerate', None) new_samplerate = self.conf.get('new_samplerate', None) if new_samplerate and samplerate:", "ud = Fraction(new_samplerate / samplerate).limit_denominator() up = ud.numerator down = ud.denominator else: up", "' 'the window, and the next arguments the needed ' 'parameters. If `window`", "True }, 'use_cpu': { 'type': 'boolean', 'description': 'use_cpu - Use CPU for computation", "use if `padtype=\"constant\"`. Default is zero.' # noqa: E131,E501 } } } return", "not ' # noqa: E131,E501 'currently exposed in cusignal.', 'enum': padtype_enum, 'default': 'constant'", "This is not ' # noqa: E131,E501 'currently exposed in cusignal.', 'enum': padtype_enum,", "[int, float, np.float32, np.float64], PortsSpecSchema.optional: True } } self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports = {'signal_out':", "' 'to use if `padtype=\"constant\"`. Default is zero.' # noqa: E131,E501 } }", "is resampled. ' 'Default is 0.', # noqa: E131,E501 'default': 0, 'minimum': 0,", "If a string choose one of ' 'available windows. If a tuple refer", "polyphase filtering. The signal is upsampled by the factor `up`, a zero-phase low-pass", "sample rate. Specify this or the ' 'up/down parameters. This is used when", "windows_enum: # window should be a string that is python code # evaluated", "'padtype': { 'type': 'string', 'description': 'Only used when `use_cpu` is set. Scipy '", "}, 'window': { 'type': 'string', 'description': 'Desired window to use to design the", "'parameters. If `window` is a list it is assumed to ' 'be the", "signal are assumed to be zero during the filtering step. Returns resampled array", "This is used when `samplerate` ' # noqa: E131,E501 'is passed in via", "is used when `samplerate` ' # noqa: E131,E501 'is passed in via ports,", "used when `use_cpu` is set. Value ' 'to use if `padtype=\"constant\"`. Default is", "signal is upsampled by the factor `up`, a zero-phase low-pass FIR filter is", "``up / down`` times the original sample rate. Values beyond the boundary of", "False) if use_cpu: padtype = self.conf.get('padtype', 'constant') cval = self.conf.get('cval') signal_out = siresamp(", "ConfSchema) from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin from ..windows import _WINS_CONFIG __all__ = ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC", "'gpupath': { 'type': 'boolean', 'description': 'gpupath - Optional path for filter design.' '", "window not in windows_enum: # window should be a string that is python", "parameters. This is used when `samplerate` ' # noqa: E131,E501 'is passed in", "the needed ' 'parameters. If `window` is a list it is assumed to", "the original by a ' 'factor of `up//gcd(up, down)`. If the port window", "of the signal are assumed to be zero during the filtering step. Returns", "'currently exposed in cusignal.', 'enum': padtype_enum, 'default': 'constant' }, 'cval': { 'type': 'number',", "' 'parameters. If `window` is a list it is assumed to ' 'be", "= {'signal_out': {}, 'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports) def conf_schema(self): padtype_enum = ['constant', 'line', 'mean',", "filter coefficients to ' # noqa: E131,E501 'employ. Window can be specified as", "'the window, and the next arguments the needed ' 'parameters. If `window` is", "'(\"kaiser\", 5.0)' }, 'gpupath': { 'type': 'boolean', 'description': 'gpupath - Optional path for", "isinstance(window, str): windows_enum = list(_WINS_CONFIG.keys()) # window could be a simple string or", "cupy as cp from cusignal.filtering.resample import resample_poly as curesamp from scipy.signal import resample_poly", "if use_cpu: padtype = self.conf.get('padtype', 'constant') cval = self.conf.get('cval') signal_out = siresamp( signal_in,", "as curesamp from scipy.signal import resample_poly as siresamp from greenflow.dataframe_flow import (Node, PortsSpecSchema,", "conf_schema(self): padtype_enum = ['constant', 'line', 'mean', 'median', 'maximum', 'minimum'] json = { 'title':", "'description': 'The axis of `x` that is resampled. ' 'Default is 0.', #", "inputs.get('samplerate', None) new_samplerate = self.conf.get('new_samplerate', None) if new_samplerate and samplerate: ud = Fraction(new_samplerate", "np.float32, np.float64], PortsSpecSchema.optional: True } } self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports = {'signal_out': {}, 'samplerate_out':", "process(self, inputs): signal_in = inputs['signal'] samplerate = inputs.get('samplerate', None) new_samplerate = self.conf.get('new_samplerate', None)", "cusignal.', 'default': False }, 'padtype': { 'type': 'string', 'description': 'Only used when `use_cpu`", "}, 'up': { 'type': 'integer', 'description': 'The upsampling factor.' }, 'down': { 'type':", "'up/down parameters. This is used when `samplerate` ' # noqa: E131,E501 'is passed", "format ' 'specifies the first argument as the string name of ' 'the", "to design the ' 'low-pass filter, or the FIR filter coefficients to '", "the factor `down`. The resulting sample rate is ``up / down`` times the", "as a string, a ' 'tuple, or a list. If a string choose", "import numpy as np import cupy as cp from cusignal.filtering.resample import resample_poly as", "`samplerate` ' # noqa: E131,E501 'is passed in via ports, otherwise up/down is", "{ 'type': 'boolean', 'description': 'gpupath - Optional path for filter design.' ' gpupath", "`up`, a zero-phase low-pass FIR filter is applied, and then it is downsampled", "' 'available windows. If a tuple refer to ' '`cusignal.windows.get_window`. The tuple format", "# window could be a simple string or python code for tuple if", "'If both are set then this takes precedence over ' 'up/down.' }, 'up':", "the string name of ' 'the window, and the next arguments the needed", "'number', 'description': 'Only used when `use_cpu` is set. Value ' 'to use if", "{ 'type': 'string', 'description': 'Desired window to use to design the ' 'low-pass", "} } return ConfSchema(json=json) def process(self, inputs): signal_in = inputs['signal'] samplerate = inputs.get('samplerate',", "= self.conf['down'] if samplerate: samplerate = inputs['samplerate'] new_samplerate = samplerate * up /", "'be the FIR filter coefficients. Note that the FIR ' 'filter is applied", "' '(\"kaiser\", 5.0)', 'default': '(\"kaiser\", 5.0)' }, 'gpupath': { 'type': 'boolean', 'description': 'gpupath", "'minimum': 0, }, 'window': { 'type': 'string', 'description': 'Desired window to use to", "'description': 'The upsampling factor.' }, 'down': { 'type': 'integer', 'description': 'The downsampling factor.'", "' 'If both are set then this takes precedence over ' 'up/down.' },", "set. Value ' 'to use if `padtype=\"constant\"`. Default is zero.' # noqa: E131,E501", "'boolean', 'description': 'use_cpu - Use CPU for computation via ' 'scipy::signal.resample_poly. Default is", "self.conf.get('new_samplerate', None) if new_samplerate and samplerate: ud = Fraction(new_samplerate / samplerate).limit_denominator() up =", "' 'tuple, or a list. If a string choose one of ' 'available", "= samplerate * up / down else: new_samplerate = up / down axis", "True }, 'window': { PortsSpecSchema.port_type: [cp.ndarray, np.ndarray], PortsSpecSchema.optional: True }, } outports =", "resulting sample rate is ``up / down`` times the original sample rate. Values", "factor `up`, a zero-phase low-pass FIR filter is applied, and then it is", "passed in via ports, otherwise up/down is used. ' 'If both are set", "cusignal.', 'enum': padtype_enum, 'default': 'constant' }, 'cval': { 'type': 'number', 'description': 'Only used", "a zero-phase low-pass FIR filter is applied, and then it is downsampled by", "[cp.ndarray, np.ndarray]}, 'samplerate': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True }, 'window':", "rate. Specify this or the ' 'up/down parameters. This is used when `samplerate`", "new_samplerate = up / down axis = self.conf.get('axis', 0) if 'window' in inputs:", "} outports = { 'signal_out': {PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out': { PortsSpecSchema.port_type: [int, float, np.float32,", "`x` that is resampled. ' 'Default is 0.', # noqa: E131,E501 'default': 0,", "this or the ' 'up/down parameters. This is used when `samplerate` ' #", "for filter design.' ' gpupath == False may be desirable if filter sizes", "'samplerate': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True }, 'window': { PortsSpecSchema.port_type:", "desirable if filter sizes ' # noqa: E131,E501 'are small.', 'default': True },", "`padtype=\"constant\"`. Default is zero.' # noqa: E131,E501 } } } return ConfSchema(json=json) def", "if `padtype=\"constant\"`. Default is zero.' # noqa: E131,E501 } } } return ConfSchema(json=json)", "samplerate).limit_denominator() up = ud.numerator down = ud.denominator else: up = self.conf['up'] down =", "times the original sample rate. Values beyond the boundary of the signal are", "= list(_WINS_CONFIG.keys()) # window could be a simple string or python code for", "'type': 'integer', 'description': 'The upsampling factor.' }, 'down': { 'type': 'integer', 'description': 'The", "import Fraction import numpy as np import cupy as cp from cusignal.filtering.resample import", "up = ud.numerator down = ud.denominator else: up = self.conf['up'] down = self.conf['down']", "= self.conf.get('cval') signal_out = siresamp( signal_in, up, down, axis=axis, window=window, padtype=padtype, cval=cval) else:", "= self.conf.get('use_cpu', False) if use_cpu: padtype = self.conf.get('padtype', 'constant') cval = self.conf.get('cval') signal_out", "`window` is a list it is assumed to ' 'be the FIR filter", "assumed to ' 'be the FIR filter coefficients. Note that the FIR '", "samplerate * up / down else: new_samplerate = up / down axis =", "factor `down`. The resulting sample rate is ``up / down`` times the original", "designed to operate on a signal at a ' 'sampling frequency higher than", "path for filter design.' ' gpupath == False may be desirable if filter", "'type': 'boolean', 'description': 'gpupath - Optional path for filter design.' ' gpupath ==", "Values beyond the boundary of the signal are assumed to be zero during", "step, so it ' 'should be designed to operate on a signal at", "import TemplateNodeMixin from ..windows import _WINS_CONFIG __all__ = ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC = '''Resample `signal`", "try: window = literal_eval(window) except Exception: raise RuntimeError('Uknown window: {}'.format(window)) gpupath = self.conf.get('gpupath',", "greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema) from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin from ..windows import _WINS_CONFIG", "both are set then this takes precedence over ' 'up/down.' }, 'up': {", "Note that the FIR ' 'filter is applied after the upsampling step, so", "{ 'type': 'integer', 'description': 'The downsampling factor.' }, 'axis': { 'type': 'integer', 'description':", "rate is ``up / down`` times the original sample rate. Values beyond the", "self.template_meta_setup(out_ports=meta_outports) def conf_schema(self): padtype_enum = ['constant', 'line', 'mean', 'median', 'maximum', 'minimum'] json =", "'median', 'maximum', 'minimum'] json = { 'title': 'Polyphase Filter Resample Node', 'type': 'object',", "'default': 'constant' }, 'cval': { 'type': 'number', 'description': 'Only used when `use_cpu` is", "Fraction(new_samplerate / samplerate).limit_denominator() up = ud.numerator down = ud.denominator else: up = self.conf['up']", "new_samplerate = self.conf.get('new_samplerate', None) if new_samplerate and samplerate: ud = Fraction(new_samplerate / samplerate).limit_denominator()", "E131,E501 'are small.', 'default': True }, 'use_cpu': { 'type': 'boolean', 'description': 'use_cpu -", "'employ. Window can be specified as a string, a ' 'tuple, or a", "`use_cpu` is set. Value ' 'to use if `padtype=\"constant\"`. Default is zero.' #", "'description': 'The downsampling factor.' }, 'axis': { 'type': 'integer', 'description': 'The axis of", "list. If a string choose one of ' 'available windows. If a tuple", "or python code for tuple if window not in windows_enum: # window should", "from ast import literal_eval from fractions import Fraction import numpy as np import", "'is passed in via ports, otherwise up/down is used. ' 'If both are", "it is downsampled by the factor `down`. The resulting sample rate is ``up", "self.conf.get('use_cpu', False) if use_cpu: padtype = self.conf.get('padtype', 'constant') cval = self.conf.get('cval') signal_out =", "# noqa: E131,E501 'is passed in via ports, otherwise up/down is used. '", "'type': 'integer', 'description': 'The axis of `x` that is resampled. ' 'Default is", "else: up = self.conf['up'] down = self.conf['down'] if samplerate: samplerate = inputs['samplerate'] new_samplerate", "json = { 'title': 'Polyphase Filter Resample Node', 'type': 'object', 'description': _RESAMPLEPOLY_DESC, 'properties':", "'window': { 'type': 'string', 'description': 'Desired window to use to design the '", "FIR filter coefficients. Note that the FIR ' 'filter is applied after the", "import resample_poly as curesamp from scipy.signal import resample_poly as siresamp from greenflow.dataframe_flow import", "_RESAMPLEPOLY_DESC = '''Resample `signal` along the given axis using polyphase filtering. The signal", "the ' 'up/down parameters. This is used when `samplerate` ' # noqa: E131,E501", "by a ' 'factor of `up//gcd(up, down)`. If the port window ' 'is", "Default ' '(\"kaiser\", 5.0)', 'default': '(\"kaiser\", 5.0)' }, 'gpupath': { 'type': 'boolean', 'description':", "resampled array and new sample rate. ''' class CusignalResamplePolyNode(TemplateNodeMixin, Node): def init(self): TemplateNodeMixin.init(self)", "windows_enum = list(_WINS_CONFIG.keys()) # window could be a simple string or python code", "up / down axis = self.conf.get('axis', 0) if 'window' in inputs: window =", "use to design the ' 'low-pass filter, or the FIR filter coefficients to", "siresamp from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema) from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin from ..windows", "array and new sample rate. ''' class CusignalResamplePolyNode(TemplateNodeMixin, Node): def init(self): TemplateNodeMixin.init(self) inports", "`resample_poly`. This is not ' # noqa: E131,E501 'currently exposed in cusignal.', 'enum':", "str): windows_enum = list(_WINS_CONFIG.keys()) # window could be a simple string or python", "zero.' # noqa: E131,E501 } } } return ConfSchema(json=json) def process(self, inputs): signal_in", "= self.conf.get('axis', 0) if 'window' in inputs: window = input['window'] else: window =", "' # noqa: E131,E501 'employ. Window can be specified as a string, a", "design the ' 'low-pass filter, or the FIR filter coefficients to ' #", "resampled. ' 'Default is 0.', # noqa: E131,E501 'default': 0, 'minimum': 0, },", "}, 'padtype': { 'type': 'string', 'description': 'Only used when `use_cpu` is set. Scipy", "after the upsampling step, so it ' 'should be designed to operate on", "sample rate. Values beyond the boundary of the signal are assumed to be", "= Fraction(new_samplerate / samplerate).limit_denominator() up = ud.numerator down = ud.denominator else: up =", "string name of ' 'the window, and the next arguments the needed '", "'sampling frequency higher than the original by a ' 'factor of `up//gcd(up, down)`.", "greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin from ..windows import _WINS_CONFIG __all__ = ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC = '''Resample", "filter coefficients. Note that the FIR ' 'filter is applied after the upsampling", "the filtering step. Returns resampled array and new sample rate. ''' class CusignalResamplePolyNode(TemplateNodeMixin,", "otherwise up/down is used. ' 'If both are set then this takes precedence", "}, } outports = { 'signal_out': {PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out': { PortsSpecSchema.port_type: [int, float,", "name of ' 'the window, and the next arguments the needed ' 'parameters.", "{}, 'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports) def conf_schema(self): padtype_enum = ['constant', 'line', 'mean', 'median', 'maximum',", "'Polyphase Filter Resample Node', 'type': 'object', 'description': _RESAMPLEPOLY_DESC, 'properties': { 'new_samplerate': { 'type':", "} return ConfSchema(json=json) def process(self, inputs): signal_in = inputs['signal'] samplerate = inputs.get('samplerate', None)", "'default': '(\"kaiser\", 5.0)' }, 'gpupath': { 'type': 'boolean', 'description': 'gpupath - Optional path", "'window': { PortsSpecSchema.port_type: [cp.ndarray, np.ndarray], PortsSpecSchema.optional: True }, } outports = { 'signal_out':", "the factor `up`, a zero-phase low-pass FIR filter is applied, and then it", "step. Returns resampled array and new sample rate. ''' class CusignalResamplePolyNode(TemplateNodeMixin, Node): def", "Returns resampled array and new sample rate. ''' class CusignalResamplePolyNode(TemplateNodeMixin, Node): def init(self):", "if filter sizes ' # noqa: E131,E501 'are small.', 'default': True }, 'use_cpu':", "takes precedence over ' 'up/down.' }, 'up': { 'type': 'integer', 'description': 'The upsampling", "' # noqa: E131,E501 'is passed in via ports, otherwise up/down is used.", "first argument as the string name of ' 'the window, and the next", "' 'low-pass filter, or the FIR filter coefficients to ' # noqa: E131,E501", "= self.conf.get('gpupath', True) use_cpu = self.conf.get('use_cpu', False) if use_cpu: padtype = self.conf.get('padtype', 'constant')", "False may be desirable if filter sizes ' # noqa: E131,E501 'are small.',", "filtering step. Returns resampled array and new sample rate. ''' class CusignalResamplePolyNode(TemplateNodeMixin, Node):", "the FIR filter coefficients. Note that the FIR ' 'filter is applied after", "E131,E501 'is passed in via ports, otherwise up/down is used. ' 'If both", "None) new_samplerate = self.conf.get('new_samplerate', None) if new_samplerate and samplerate: ud = Fraction(new_samplerate /", "`up//gcd(up, down)`. If the port window ' 'is connected it takes precedence. Default", "'padtype parameter of `resample_poly`. This is not ' # noqa: E131,E501 'currently exposed", "TemplateNodeMixin.init(self) inports = { 'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]}, 'samplerate': { PortsSpecSchema.port_type: [int, float,", "list it is assumed to ' 'be the FIR filter coefficients. Note that", "is set. Value ' 'to use if `padtype=\"constant\"`. Default is zero.' # noqa:", "input['window'] else: window = self.conf.get('window', (\"kaiser\", 5.0)) if isinstance(window, str): windows_enum = list(_WINS_CONFIG.keys())", "a ' 'tuple, or a list. If a string choose one of '", "filtering. The signal is upsampled by the factor `up`, a zero-phase low-pass FIR", "'type': 'object', 'description': _RESAMPLEPOLY_DESC, 'properties': { 'new_samplerate': { 'type': 'number', 'description': 'Desired sample", "float, np.float32, np.float64], PortsSpecSchema.optional: True } } self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports = {'signal_out': {},", "self.conf.get('window', (\"kaiser\", 5.0)) if isinstance(window, str): windows_enum = list(_WINS_CONFIG.keys()) # window could be", "self.conf.get('padtype', 'constant') cval = self.conf.get('cval') signal_out = siresamp( signal_in, up, down, axis=axis, window=window,", "it ' 'should be designed to operate on a signal at a '", "outports = { 'signal_out': {PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64],", "'low-pass filter, or the FIR filter coefficients to ' # noqa: E131,E501 'employ.", "or a list. If a string choose one of ' 'available windows. If", "The tuple format ' 'specifies the first argument as the string name of", "= self.conf.get('new_samplerate', None) if new_samplerate and samplerate: ud = Fraction(new_samplerate / samplerate).limit_denominator() up", "simple string or python code for tuple if window not in windows_enum: #", "except Exception: raise RuntimeError('Uknown window: {}'.format(window)) gpupath = self.conf.get('gpupath', True) use_cpu = self.conf.get('use_cpu',", "literal_eval(window) except Exception: raise RuntimeError('Uknown window: {}'.format(window)) gpupath = self.conf.get('gpupath', True) use_cpu =", "the ' 'low-pass filter, or the FIR filter coefficients to ' # noqa:", "= self.conf.get('padtype', 'constant') cval = self.conf.get('cval') signal_out = siresamp( signal_in, up, down, axis=axis,", "coefficients to ' # noqa: E131,E501 'employ. Window can be specified as a", "= { 'title': 'Polyphase Filter Resample Node', 'type': 'object', 'description': _RESAMPLEPOLY_DESC, 'properties': {", "down axis = self.conf.get('axis', 0) if 'window' in inputs: window = input['window'] else:", "parameter of `resample_poly`. This is not ' # noqa: E131,E501 'currently exposed in", "'type': 'string', 'description': 'Desired window to use to design the ' 'low-pass filter,", "{ PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True }, 'window': { PortsSpecSchema.port_type: [cp.ndarray,", "np.float64], PortsSpecSchema.optional: True }, 'window': { PortsSpecSchema.port_type: [cp.ndarray, np.ndarray], PortsSpecSchema.optional: True }, }", "curesamp from scipy.signal import resample_poly as siresamp from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema)", "is upsampled by the factor `up`, a zero-phase low-pass FIR filter is applied,", "python code for tuple if window not in windows_enum: # window should be", "{ 'type': 'integer', 'description': 'The axis of `x` that is resampled. ' 'Default", "noqa: E131,E501 'are small.', 'default': True }, 'use_cpu': { 'type': 'boolean', 'description': 'use_cpu", "next arguments the needed ' 'parameters. If `window` is a list it is", "' 'up/down.' }, 'up': { 'type': 'integer', 'description': 'The upsampling factor.' }, 'down':", "down = self.conf['down'] if samplerate: samplerate = inputs['samplerate'] new_samplerate = samplerate * up", "a list. If a string choose one of ' 'available windows. If a", "'up/down.' }, 'up': { 'type': 'integer', 'description': 'The upsampling factor.' }, 'down': {", "from scipy.signal import resample_poly as siresamp from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema) from", "literal_eval from fractions import Fraction import numpy as np import cupy as cp", "inputs): signal_in = inputs['signal'] samplerate = inputs.get('samplerate', None) new_samplerate = self.conf.get('new_samplerate', None) if", "this takes precedence over ' 'up/down.' }, 'up': { 'type': 'integer', 'description': 'The", "on GPU via cusignal.', 'default': False }, 'padtype': { 'type': 'string', 'description': 'Only", "up / down else: new_samplerate = up / down axis = self.conf.get('axis', 0)", "' 'scipy::signal.resample_poly. Default is False and ' # noqa: E131,E501 'runs on GPU", "when `use_cpu` is set. Value ' 'to use if `padtype=\"constant\"`. Default is zero.'", "or the ' 'up/down parameters. This is used when `samplerate` ' # noqa:", "'maximum', 'minimum'] json = { 'title': 'Polyphase Filter Resample Node', 'type': 'object', 'description':", "} self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports = {'signal_out': {}, 'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports) def conf_schema(self): padtype_enum", "inputs['samplerate'] new_samplerate = samplerate * up / down else: new_samplerate = up /", "if samplerate: samplerate = inputs['samplerate'] new_samplerate = samplerate * up / down else:", "True }, } outports = { 'signal_out': {PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out': { PortsSpecSchema.port_type: [int,", "noqa: E131,E501 'employ. Window can be specified as a string, a ' 'tuple,", "tuple. try: window = literal_eval(window) except Exception: raise RuntimeError('Uknown window: {}'.format(window)) gpupath =", "window: {}'.format(window)) gpupath = self.conf.get('gpupath', True) use_cpu = self.conf.get('use_cpu', False) if use_cpu: padtype", "/ down axis = self.conf.get('axis', 0) if 'window' in inputs: window = input['window']", "PortsSpecSchema.optional: True }, 'window': { PortsSpecSchema.port_type: [cp.ndarray, np.ndarray], PortsSpecSchema.optional: True }, } outports", "down)`. If the port window ' 'is connected it takes precedence. Default '", "`down`. The resulting sample rate is ``up / down`` times the original sample", "axis of `x` that is resampled. ' 'Default is 0.', # noqa: E131,E501", "' 'padtype parameter of `resample_poly`. This is not ' # noqa: E131,E501 'currently", "'mean', 'median', 'maximum', 'minimum'] json = { 'title': 'Polyphase Filter Resample Node', 'type':", "'`cusignal.windows.get_window`. The tuple format ' 'specifies the first argument as the string name", "factor.' }, 'down': { 'type': 'integer', 'description': 'The downsampling factor.' }, 'axis': {", "applied after the upsampling step, so it ' 'should be designed to operate", "exposed in cusignal.', 'enum': padtype_enum, 'default': 'constant' }, 'cval': { 'type': 'number', 'description':", "{'signal_out': {}, 'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports) def conf_schema(self): padtype_enum = ['constant', 'line', 'mean', 'median',", "upsampling step, so it ' 'should be designed to operate on a signal", "The signal is upsampled by the factor `up`, a zero-phase low-pass FIR filter", "'(\"kaiser\", 5.0)', 'default': '(\"kaiser\", 5.0)' }, 'gpupath': { 'type': 'boolean', 'description': 'gpupath -", "' 'specifies the first argument as the string name of ' 'the window,", "{PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]}, 'samplerate': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True },", "by the factor `up`, a zero-phase low-pass FIR filter is applied, and then", "import literal_eval from fractions import Fraction import numpy as np import cupy as", "np.float32, np.float64], PortsSpecSchema.optional: True }, 'window': { PortsSpecSchema.port_type: [cp.ndarray, np.ndarray], PortsSpecSchema.optional: True },", "window = input['window'] else: window = self.conf.get('window', (\"kaiser\", 5.0)) if isinstance(window, str): windows_enum", "tuple if window not in windows_enum: # window should be a string that", "the upsampling step, so it ' 'should be designed to operate on a", "up, down, axis=axis, window=window, padtype=padtype, cval=cval) else: signal_out = curesamp( signal_in, up, down,", "for computation via ' 'scipy::signal.resample_poly. Default is False and ' # noqa: E131,E501", "as the string name of ' 'the window, and the next arguments the", "'window' in inputs: window = input['window'] else: window = self.conf.get('window', (\"kaiser\", 5.0)) if", "_RESAMPLEPOLY_DESC, 'properties': { 'new_samplerate': { 'type': 'number', 'description': 'Desired sample rate. Specify this", "arguments the needed ' 'parameters. If `window` is a list it is assumed", "'description': 'gpupath - Optional path for filter design.' ' gpupath == False may", "over ' 'up/down.' }, 'up': { 'type': 'integer', 'description': 'The upsampling factor.' },", "'line', 'mean', 'median', 'maximum', 'minimum'] json = { 'title': 'Polyphase Filter Resample Node',", "'gpupath - Optional path for filter design.' ' gpupath == False may be", "/ down else: new_samplerate = up / down axis = self.conf.get('axis', 0) if", "PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True } } self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports =", "samplerate: ud = Fraction(new_samplerate / samplerate).limit_denominator() up = ud.numerator down = ud.denominator else:", "down = ud.denominator else: up = self.conf['up'] down = self.conf['down'] if samplerate: samplerate", "'signal_out': {PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True }", "and samplerate: ud = Fraction(new_samplerate / samplerate).limit_denominator() up = ud.numerator down = ud.denominator", "that is resampled. ' 'Default is 0.', # noqa: E131,E501 'default': 0, 'minimum':", "}, 'cval': { 'type': 'number', 'description': 'Only used when `use_cpu` is set. Value", "be zero during the filtering step. Returns resampled array and new sample rate.", "scipy.signal import resample_poly as siresamp from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema) from greenflow.dataframe_flow.template_node_mixin", "'factor of `up//gcd(up, down)`. If the port window ' 'is connected it takes", "'title': 'Polyphase Filter Resample Node', 'type': 'object', 'description': _RESAMPLEPOLY_DESC, 'properties': { 'new_samplerate': {", "[cp.ndarray, np.ndarray], PortsSpecSchema.optional: True }, } outports = { 'signal_out': {PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out':", "'Desired sample rate. Specify this or the ' 'up/down parameters. This is used", "class CusignalResamplePolyNode(TemplateNodeMixin, Node): def init(self): TemplateNodeMixin.init(self) inports = { 'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]},", "float, np.float32, np.float64], PortsSpecSchema.optional: True }, 'window': { PortsSpecSchema.port_type: [cp.ndarray, np.ndarray], PortsSpecSchema.optional: True", "than the original by a ' 'factor of `up//gcd(up, down)`. If the port", "E131,E501 'employ. Window can be specified as a string, a ' 'tuple, or", "to be zero during the filtering step. Returns resampled array and new sample", "sizes ' # noqa: E131,E501 'are small.', 'default': True }, 'use_cpu': { 'type':", "{ 'type': 'boolean', 'description': 'use_cpu - Use CPU for computation via ' 'scipy::signal.resample_poly.", "Resample Node', 'type': 'object', 'description': _RESAMPLEPOLY_DESC, 'properties': { 'new_samplerate': { 'type': 'number', 'description':", "[int, float, np.float32, np.float64], PortsSpecSchema.optional: True }, 'window': { PortsSpecSchema.port_type: [cp.ndarray, np.ndarray], PortsSpecSchema.optional:", "'specifies the first argument as the string name of ' 'the window, and", "a string, a ' 'tuple, or a list. If a string choose one", "'description': 'Only used when `use_cpu` is set. Value ' 'to use if `padtype=\"constant\"`.", "string choose one of ' 'available windows. If a tuple refer to '", "'should be designed to operate on a signal at a ' 'sampling frequency", "'enum': padtype_enum, 'default': 'constant' }, 'cval': { 'type': 'number', 'description': 'Only used when", "of `x` that is resampled. ' 'Default is 0.', # noqa: E131,E501 'default':", "axis using polyphase filtering. The signal is upsampled by the factor `up`, a", "then this takes precedence over ' 'up/down.' }, 'up': { 'type': 'integer', 'description':", "one of ' 'available windows. If a tuple refer to ' '`cusignal.windows.get_window`. The", "If a tuple refer to ' '`cusignal.windows.get_window`. The tuple format ' 'specifies the", "' '`cusignal.windows.get_window`. The tuple format ' 'specifies the first argument as the string", "is set. Scipy ' 'padtype parameter of `resample_poly`. This is not ' #", "= self.conf.get('window', (\"kaiser\", 5.0)) if isinstance(window, str): windows_enum = list(_WINS_CONFIG.keys()) # window could", "down, axis=axis, window=window, padtype=padtype, cval=cval) else: signal_out = curesamp( signal_in, up, down, axis=axis,", "then it is downsampled by the factor `down`. The resulting sample rate is", "'integer', 'description': 'The axis of `x` that is resampled. ' 'Default is 0.',", "'boolean', 'description': 'gpupath - Optional path for filter design.' ' gpupath == False", "'cval': { 'type': 'number', 'description': 'Only used when `use_cpu` is set. Value '", "applied, and then it is downsampled by the factor `down`. The resulting sample", "{PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True } }", "Use CPU for computation via ' 'scipy::signal.resample_poly. Default is False and ' #", "downsampled by the factor `down`. The resulting sample rate is ``up / down``", "'default': False }, 'padtype': { 'type': 'string', 'description': 'Only used when `use_cpu` is", "the next arguments the needed ' 'parameters. If `window` is a list it", "- Optional path for filter design.' ' gpupath == False may be desirable", "a string that is python code # evaluated to a tuple. try: window", "'description': 'Desired sample rate. Specify this or the ' 'up/down parameters. This is", "CPU for computation via ' 'scipy::signal.resample_poly. Default is False and ' # noqa:", "cp from cusignal.filtering.resample import resample_poly as curesamp from scipy.signal import resample_poly as siresamp", "window = self.conf.get('window', (\"kaiser\", 5.0)) if isinstance(window, str): windows_enum = list(_WINS_CONFIG.keys()) # window", "new sample rate. ''' class CusignalResamplePolyNode(TemplateNodeMixin, Node): def init(self): TemplateNodeMixin.init(self) inports = {", "'type': 'integer', 'description': 'The downsampling factor.' }, 'axis': { 'type': 'integer', 'description': 'The", "E131,E501 'currently exposed in cusignal.', 'enum': padtype_enum, 'default': 'constant' }, 'cval': { 'type':", "= input['window'] else: window = self.conf.get('window', (\"kaiser\", 5.0)) if isinstance(window, str): windows_enum =", "is used. ' 'If both are set then this takes precedence over '", "'type': 'number', 'description': 'Only used when `use_cpu` is set. Value ' 'to use", "a ' 'factor of `up//gcd(up, down)`. If the port window ' 'is connected", "{ 'new_samplerate': { 'type': 'number', 'description': 'Desired sample rate. Specify this or the", "is applied after the upsampling step, so it ' 'should be designed to", "= inputs['signal'] samplerate = inputs.get('samplerate', None) new_samplerate = self.conf.get('new_samplerate', None) if new_samplerate and", "'properties': { 'new_samplerate': { 'type': 'number', 'description': 'Desired sample rate. Specify this or", "/ samplerate).limit_denominator() up = ud.numerator down = ud.denominator else: up = self.conf['up'] down", "'tuple, or a list. If a string choose one of ' 'available windows.", "gpupath == False may be desirable if filter sizes ' # noqa: E131,E501", "np.ndarray]}, 'samplerate': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True }, 'window': {", "it is assumed to ' 'be the FIR filter coefficients. Note that the", "window, and the next arguments the needed ' 'parameters. If `window` is a", "in inputs: window = input['window'] else: window = self.conf.get('window', (\"kaiser\", 5.0)) if isinstance(window,", "cval=cval) else: signal_out = curesamp( signal_in, up, down, axis=axis, window=window, gpupath=gpupath) return {'signal_out':", "original by a ' 'factor of `up//gcd(up, down)`. If the port window '", "to ' 'be the FIR filter coefficients. Note that the FIR ' 'filter", "rate. Values beyond the boundary of the signal are assumed to be zero", "from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin from ..windows import _WINS_CONFIG __all__ = ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC =", "# noqa: E131,E501 'runs on GPU via cusignal.', 'default': False }, 'padtype': {", "noqa: E131,E501 } } } return ConfSchema(json=json) def process(self, inputs): signal_in = inputs['signal']", "in windows_enum: # window should be a string that is python code #", "signal_out = curesamp( signal_in, up, down, axis=axis, window=window, gpupath=gpupath) return {'signal_out': signal_out, 'samplerate_out':", "using polyphase filtering. The signal is upsampled by the factor `up`, a zero-phase", "''' class CusignalResamplePolyNode(TemplateNodeMixin, Node): def init(self): TemplateNodeMixin.init(self) inports = { 'signal': {PortsSpecSchema.port_type: [cp.ndarray,", "precedence over ' 'up/down.' }, 'up': { 'type': 'integer', 'description': 'The upsampling factor.'", "can be specified as a string, a ' 'tuple, or a list. If", "zero-phase low-pass FIR filter is applied, and then it is downsampled by the", "upsampling factor.' }, 'down': { 'type': 'integer', 'description': 'The downsampling factor.' }, 'axis':", "{ PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True } } self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports", "inports = { 'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]}, 'samplerate': { PortsSpecSchema.port_type: [int, float, np.float32,", "'description': _RESAMPLEPOLY_DESC, 'properties': { 'new_samplerate': { 'type': 'number', 'description': 'Desired sample rate. Specify", "FIR filter coefficients to ' # noqa: E131,E501 'employ. Window can be specified", "- Use CPU for computation via ' 'scipy::signal.resample_poly. Default is False and '", "siresamp( signal_in, up, down, axis=axis, window=window, padtype=padtype, cval=cval) else: signal_out = curesamp( signal_in,", "'minimum'] json = { 'title': 'Polyphase Filter Resample Node', 'type': 'object', 'description': _RESAMPLEPOLY_DESC,", "'type': 'number', 'description': 'Desired sample rate. Specify this or the ' 'up/down parameters.", "in cusignal.', 'enum': padtype_enum, 'default': 'constant' }, 'cval': { 'type': 'number', 'description': 'Only", "from cusignal.filtering.resample import resample_poly as curesamp from scipy.signal import resample_poly as siresamp from", "}, 'window': { PortsSpecSchema.port_type: [cp.ndarray, np.ndarray], PortsSpecSchema.optional: True }, } outports = {", "tuple refer to ' '`cusignal.windows.get_window`. The tuple format ' 'specifies the first argument", "higher than the original by a ' 'factor of `up//gcd(up, down)`. If the", "' 'sampling frequency higher than the original by a ' 'factor of `up//gcd(up,", "'''Resample `signal` along the given axis using polyphase filtering. The signal is upsampled", "CusignalResamplePolyNode(TemplateNodeMixin, Node): def init(self): TemplateNodeMixin.init(self) inports = { 'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]}, 'samplerate':", "string, a ' 'tuple, or a list. If a string choose one of", "The resulting sample rate is ``up / down`` times the original sample rate.", "or the FIR filter coefficients to ' # noqa: E131,E501 'employ. Window can", "if 'window' in inputs: window = input['window'] else: window = self.conf.get('window', (\"kaiser\", 5.0))", "{}'.format(window)) gpupath = self.conf.get('gpupath', True) use_cpu = self.conf.get('use_cpu', False) if use_cpu: padtype =", "PortsSpecSchema.optional: True }, } outports = { 'signal_out': {PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out': { PortsSpecSchema.port_type:", "string or python code for tuple if window not in windows_enum: # window", "be designed to operate on a signal at a ' 'sampling frequency higher", "_WINS_CONFIG __all__ = ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC = '''Resample `signal` along the given axis using", "could be a simple string or python code for tuple if window not", "axis = self.conf.get('axis', 0) if 'window' in inputs: window = input['window'] else: window", "factor.' }, 'axis': { 'type': 'integer', 'description': 'The axis of `x` that is", "noqa: E131,E501 'currently exposed in cusignal.', 'enum': padtype_enum, 'default': 'constant' }, 'cval': {", "' gpupath == False may be desirable if filter sizes ' # noqa:", "' 'filter is applied after the upsampling step, so it ' 'should be", "the boundary of the signal are assumed to be zero during the filtering", "to ' # noqa: E131,E501 'employ. Window can be specified as a string,", "are assumed to be zero during the filtering step. Returns resampled array and", "when `samplerate` ' # noqa: E131,E501 'is passed in via ports, otherwise up/down", "'The downsampling factor.' }, 'axis': { 'type': 'integer', 'description': 'The axis of `x`", "boundary of the signal are assumed to be zero during the filtering step.", "down else: new_samplerate = up / down axis = self.conf.get('axis', 0) if 'window'", "FIR ' 'filter is applied after the upsampling step, so it ' 'should", "that the FIR ' 'filter is applied after the upsampling step, so it", "as siresamp from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema) from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin from", "# noqa: E131,E501 'employ. Window can be specified as a string, a '", "by the factor `down`. The resulting sample rate is ``up / down`` times", "operate on a signal at a ' 'sampling frequency higher than the original", "from ..windows import _WINS_CONFIG __all__ = ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC = '''Resample `signal` along the", "'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports) def conf_schema(self): padtype_enum = ['constant', 'line', 'mean', 'median', 'maximum', 'minimum']", "5.0)', 'default': '(\"kaiser\", 5.0)' }, 'gpupath': { 'type': 'boolean', 'description': 'gpupath - Optional", "{ 'title': 'Polyphase Filter Resample Node', 'type': 'object', 'description': _RESAMPLEPOLY_DESC, 'properties': { 'new_samplerate':", "return ConfSchema(json=json) def process(self, inputs): signal_in = inputs['signal'] samplerate = inputs.get('samplerate', None) new_samplerate", "samplerate: samplerate = inputs['samplerate'] new_samplerate = samplerate * up / down else: new_samplerate", "['constant', 'line', 'mean', 'median', 'maximum', 'minimum'] json = { 'title': 'Polyphase Filter Resample", "the FIR ' 'filter is applied after the upsampling step, so it '", "'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]}, 'samplerate': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True", "'filter is applied after the upsampling step, so it ' 'should be designed", "# noqa: E131,E501 'currently exposed in cusignal.', 'enum': padtype_enum, 'default': 'constant' }, 'cval':", "signal_out = siresamp( signal_in, up, down, axis=axis, window=window, padtype=padtype, cval=cval) else: signal_out =", "inputs: window = input['window'] else: window = self.conf.get('window', (\"kaiser\", 5.0)) if isinstance(window, str):", "if new_samplerate and samplerate: ud = Fraction(new_samplerate / samplerate).limit_denominator() up = ud.numerator down", "a simple string or python code for tuple if window not in windows_enum:", "be a simple string or python code for tuple if window not in", "' 'factor of `up//gcd(up, down)`. If the port window ' 'is connected it", "'scipy::signal.resample_poly. Default is False and ' # noqa: E131,E501 'runs on GPU via", "port window ' 'is connected it takes precedence. Default ' '(\"kaiser\", 5.0)', 'default':", "used. ' 'If both are set then this takes precedence over ' 'up/down.'", "down`` times the original sample rate. Values beyond the boundary of the signal", "..windows import _WINS_CONFIG __all__ = ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC = '''Resample `signal` along the given", "along the given axis using polyphase filtering. The signal is upsampled by the", "from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema) from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin from ..windows import", "'is connected it takes precedence. Default ' '(\"kaiser\", 5.0)', 'default': '(\"kaiser\", 5.0)' },", "may be desirable if filter sizes ' # noqa: E131,E501 'are small.', 'default':", "{ 'type': 'number', 'description': 'Only used when `use_cpu` is set. Value ' 'to", "== False may be desirable if filter sizes ' # noqa: E131,E501 'are", "axis=axis, window=window, padtype=padtype, cval=cval) else: signal_out = curesamp( signal_in, up, down, axis=axis, window=window,", "'are small.', 'default': True }, 'use_cpu': { 'type': 'boolean', 'description': 'use_cpu - Use", "# noqa: E131,E501 'default': 0, 'minimum': 0, }, 'window': { 'type': 'string', 'description':", "init(self): TemplateNodeMixin.init(self) inports = { 'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]}, 'samplerate': { PortsSpecSchema.port_type: [int,", "def init(self): TemplateNodeMixin.init(self) inports = { 'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]}, 'samplerate': { PortsSpecSchema.port_type:", "upsampled by the factor `up`, a zero-phase low-pass FIR filter is applied, and", "are set then this takes precedence over ' 'up/down.' }, 'up': { 'type':", "cusignal.filtering.resample import resample_poly as curesamp from scipy.signal import resample_poly as siresamp from greenflow.dataframe_flow", "' 'up/down parameters. This is used when `samplerate` ' # noqa: E131,E501 'is", "and the next arguments the needed ' 'parameters. If `window` is a list", "a list it is assumed to ' 'be the FIR filter coefficients. Note", "is ``up / down`` times the original sample rate. Values beyond the boundary", "'object', 'description': _RESAMPLEPOLY_DESC, 'properties': { 'new_samplerate': { 'type': 'number', 'description': 'Desired sample rate.", "frequency higher than the original by a ' 'factor of `up//gcd(up, down)`. If", "{ PortsSpecSchema.port_type: [cp.ndarray, np.ndarray], PortsSpecSchema.optional: True }, } outports = { 'signal_out': {PortsSpecSchema.port_type:", "Window can be specified as a string, a ' 'tuple, or a list.", "' # noqa: E131,E501 'currently exposed in cusignal.', 'enum': padtype_enum, 'default': 'constant' },", "0, 'minimum': 0, }, 'window': { 'type': 'string', 'description': 'Desired window to use", "numpy as np import cupy as cp from cusignal.filtering.resample import resample_poly as curesamp", "the signal are assumed to be zero during the filtering step. Returns resampled", "window could be a simple string or python code for tuple if window", "} } } return ConfSchema(json=json) def process(self, inputs): signal_in = inputs['signal'] samplerate =", "}, 'down': { 'type': 'integer', 'description': 'The downsampling factor.' }, 'axis': { 'type':", "= '''Resample `signal` along the given axis using polyphase filtering. The signal is", "evaluated to a tuple. try: window = literal_eval(window) except Exception: raise RuntimeError('Uknown window:", "'to use if `padtype=\"constant\"`. Default is zero.' # noqa: E131,E501 } } }", "`signal` along the given axis using polyphase filtering. The signal is upsampled by", "downsampling factor.' }, 'axis': { 'type': 'integer', 'description': 'The axis of `x` that", "refer to ' '`cusignal.windows.get_window`. The tuple format ' 'specifies the first argument as", "'The axis of `x` that is resampled. ' 'Default is 0.', # noqa:", "= { 'signal_out': {PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional:", "self.conf.get('gpupath', True) use_cpu = self.conf.get('use_cpu', False) if use_cpu: padtype = self.conf.get('padtype', 'constant') cval", "samplerate = inputs['samplerate'] new_samplerate = samplerate * up / down else: new_samplerate =", "E131,E501 'default': 0, 'minimum': 0, }, 'window': { 'type': 'string', 'description': 'Desired window", "0) if 'window' in inputs: window = input['window'] else: window = self.conf.get('window', (\"kaiser\",", "TemplateNodeMixin from ..windows import _WINS_CONFIG __all__ = ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC = '''Resample `signal` along", "raise RuntimeError('Uknown window: {}'.format(window)) gpupath = self.conf.get('gpupath', True) use_cpu = self.conf.get('use_cpu', False) if", "window to use to design the ' 'low-pass filter, or the FIR filter", "np.float64], PortsSpecSchema.optional: True } } self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports = {'signal_out': {}, 'samplerate_out': {}}", "should be a string that is python code # evaluated to a tuple.", "FIR filter is applied, and then it is downsampled by the factor `down`.", "def conf_schema(self): padtype_enum = ['constant', 'line', 'mean', 'median', 'maximum', 'minimum'] json = {", "precedence. Default ' '(\"kaiser\", 5.0)', 'default': '(\"kaiser\", 5.0)' }, 'gpupath': { 'type': 'boolean',", "not in windows_enum: # window should be a string that is python code", "takes precedence. Default ' '(\"kaiser\", 5.0)', 'default': '(\"kaiser\", 5.0)' }, 'gpupath': { 'type':", "0, }, 'window': { 'type': 'string', 'description': 'Desired window to use to design", "use_cpu: padtype = self.conf.get('padtype', 'constant') cval = self.conf.get('cval') signal_out = siresamp( signal_in, up,", "to a tuple. try: window = literal_eval(window) except Exception: raise RuntimeError('Uknown window: {}'.format(window))", "5.0)' }, 'gpupath': { 'type': 'boolean', 'description': 'gpupath - Optional path for filter", "use_cpu = self.conf.get('use_cpu', False) if use_cpu: padtype = self.conf.get('padtype', 'constant') cval = self.conf.get('cval')", "'description': 'use_cpu - Use CPU for computation via ' 'scipy::signal.resample_poly. Default is False", "Default is False and ' # noqa: E131,E501 'runs on GPU via cusignal.',", "= { 'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]}, 'samplerate': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64],", "import _WINS_CONFIG __all__ = ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC = '''Resample `signal` along the given axis", "set. Scipy ' 'padtype parameter of `resample_poly`. This is not ' # noqa:", "is 0.', # noqa: E131,E501 'default': 0, 'minimum': 0, }, 'window': { 'type':", "'use_cpu': { 'type': 'boolean', 'description': 'use_cpu - Use CPU for computation via '", "of ' 'available windows. If a tuple refer to ' '`cusignal.windows.get_window`. The tuple", "= ud.denominator else: up = self.conf['up'] down = self.conf['down'] if samplerate: samplerate =", "zero during the filtering step. Returns resampled array and new sample rate. '''", "as np import cupy as cp from cusignal.filtering.resample import resample_poly as curesamp from", "needed ' 'parameters. If `window` is a list it is assumed to '", "__all__ = ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC = '''Resample `signal` along the given axis using polyphase", "out_ports=outports) meta_outports = {'signal_out': {}, 'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports) def conf_schema(self): padtype_enum = ['constant',", "signal at a ' 'sampling frequency higher than the original by a '", "the original sample rate. Values beyond the boundary of the signal are assumed", "' 'is connected it takes precedence. Default ' '(\"kaiser\", 5.0)', 'default': '(\"kaiser\", 5.0)'", "list(_WINS_CONFIG.keys()) # window could be a simple string or python code for tuple", "['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC = '''Resample `signal` along the given axis using polyphase filtering. The", "self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports = {'signal_out': {}, 'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports) def conf_schema(self): padtype_enum =", "python code # evaluated to a tuple. try: window = literal_eval(window) except Exception:", "else: signal_out = curesamp( signal_in, up, down, axis=axis, window=window, gpupath=gpupath) return {'signal_out': signal_out,", "PortsSpecSchema.optional: True } } self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports = {'signal_out': {}, 'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports)", "at a ' 'sampling frequency higher than the original by a ' 'factor", "'number', 'description': 'Desired sample rate. Specify this or the ' 'up/down parameters. This", "# evaluated to a tuple. try: window = literal_eval(window) except Exception: raise RuntimeError('Uknown", "that is python code # evaluated to a tuple. try: window = literal_eval(window)", "self.conf.get('cval') signal_out = siresamp( signal_in, up, down, axis=axis, window=window, padtype=padtype, cval=cval) else: signal_out", "resample_poly as curesamp from scipy.signal import resample_poly as siresamp from greenflow.dataframe_flow import (Node,", "' 'Default is 0.', # noqa: E131,E501 'default': 0, 'minimum': 0, }, 'window':", "= ['CusignalResamplePolyNode'] _RESAMPLEPOLY_DESC = '''Resample `signal` along the given axis using polyphase filtering.", "= up / down axis = self.conf.get('axis', 0) if 'window' in inputs: window", "True) use_cpu = self.conf.get('use_cpu', False) if use_cpu: padtype = self.conf.get('padtype', 'constant') cval =", "self.conf['up'] down = self.conf['down'] if samplerate: samplerate = inputs['samplerate'] new_samplerate = samplerate *", "cval = self.conf.get('cval') signal_out = siresamp( signal_in, up, down, axis=axis, window=window, padtype=padtype, cval=cval)", "padtype=padtype, cval=cval) else: signal_out = curesamp( signal_in, up, down, axis=axis, window=window, gpupath=gpupath) return", "import cupy as cp from cusignal.filtering.resample import resample_poly as curesamp from scipy.signal import", "If `window` is a list it is assumed to ' 'be the FIR", "up/down is used. ' 'If both are set then this takes precedence over", "to ' '`cusignal.windows.get_window`. The tuple format ' 'specifies the first argument as the", "' # noqa: E131,E501 'are small.', 'default': True }, 'use_cpu': { 'type': 'boolean',", "else: new_samplerate = up / down axis = self.conf.get('axis', 0) if 'window' in", "filter design.' ' gpupath == False may be desirable if filter sizes '", "import resample_poly as siresamp from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema) from greenflow.dataframe_flow.template_node_mixin import", "meta_outports = {'signal_out': {}, 'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports) def conf_schema(self): padtype_enum = ['constant', 'line',", "connected it takes precedence. Default ' '(\"kaiser\", 5.0)', 'default': '(\"kaiser\", 5.0)' }, 'gpupath':", "used when `use_cpu` is set. Scipy ' 'padtype parameter of `resample_poly`. This is", "`use_cpu` is set. Scipy ' 'padtype parameter of `resample_poly`. This is not '", "noqa: E131,E501 'runs on GPU via cusignal.', 'default': False }, 'padtype': { 'type':", "up = self.conf['up'] down = self.conf['down'] if samplerate: samplerate = inputs['samplerate'] new_samplerate =", "'runs on GPU via cusignal.', 'default': False }, 'padtype': { 'type': 'string', 'description':", "'${port:signal}'}, 'samplerate_out': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True } } self.template_ports_setup(in_ports=inports,", "Specify this or the ' 'up/down parameters. This is used when `samplerate` '", "of ' 'the window, and the next arguments the needed ' 'parameters. If", "the port window ' 'is connected it takes precedence. Default ' '(\"kaiser\", 5.0)',", "{ 'signal': {PortsSpecSchema.port_type: [cp.ndarray, np.ndarray]}, 'samplerate': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional:", "{ 'signal_out': {PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out': { PortsSpecSchema.port_type: [int, float, np.float32, np.float64], PortsSpecSchema.optional: True", "as cp from cusignal.filtering.resample import resample_poly as curesamp from scipy.signal import resample_poly as", "of `up//gcd(up, down)`. If the port window ' 'is connected it takes precedence.", "} } self.template_ports_setup(in_ports=inports, out_ports=outports) meta_outports = {'signal_out': {}, 'samplerate_out': {}} self.template_meta_setup(out_ports=meta_outports) def conf_schema(self):", "'Only used when `use_cpu` is set. Scipy ' 'padtype parameter of `resample_poly`. This", "new_samplerate and samplerate: ud = Fraction(new_samplerate / samplerate).limit_denominator() up = ud.numerator down =", "is not ' # noqa: E131,E501 'currently exposed in cusignal.', 'enum': padtype_enum, 'default':", "'integer', 'description': 'The upsampling factor.' }, 'down': { 'type': 'integer', 'description': 'The downsampling", "filter sizes ' # noqa: E131,E501 'are small.', 'default': True }, 'use_cpu': {", "'available windows. If a tuple refer to ' '`cusignal.windows.get_window`. The tuple format '", "Value ' 'to use if `padtype=\"constant\"`. Default is zero.' # noqa: E131,E501 }", "'default': 0, 'minimum': 0, }, 'window': { 'type': 'string', 'description': 'Desired window to", "'axis': { 'type': 'integer', 'description': 'The axis of `x` that is resampled. '", "resample_poly as siresamp from greenflow.dataframe_flow import (Node, PortsSpecSchema, ConfSchema) from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin", "low-pass FIR filter is applied, and then it is downsampled by the factor", "0.', # noqa: E131,E501 'default': 0, 'minimum': 0, }, 'window': { 'type': 'string',", "a tuple refer to ' '`cusignal.windows.get_window`. The tuple format ' 'specifies the first", "'use_cpu - Use CPU for computation via ' 'scipy::signal.resample_poly. Default is False and", "samplerate = inputs.get('samplerate', None) new_samplerate = self.conf.get('new_samplerate', None) if new_samplerate and samplerate: ud", "choose one of ' 'available windows. If a tuple refer to ' '`cusignal.windows.get_window`.", "'Desired window to use to design the ' 'low-pass filter, or the FIR", "the first argument as the string name of ' 'the window, and the", "# window should be a string that is python code # evaluated to", "' 'should be designed to operate on a signal at a ' 'sampling", "a signal at a ' 'sampling frequency higher than the original by a", "padtype_enum, 'default': 'constant' }, 'cval': { 'type': 'number', 'description': 'Only used when `use_cpu`", "Default is zero.' # noqa: E131,E501 } } } return ConfSchema(json=json) def process(self,", "ud.denominator else: up = self.conf['up'] down = self.conf['down'] if samplerate: samplerate = inputs['samplerate']", "is zero.' # noqa: E131,E501 } } } return ConfSchema(json=json) def process(self, inputs):", "' # noqa: E131,E501 'runs on GPU via cusignal.', 'default': False }, 'padtype':", "if window not in windows_enum: # window should be a string that is", "gpupath = self.conf.get('gpupath', True) use_cpu = self.conf.get('use_cpu', False) if use_cpu: padtype = self.conf.get('padtype',", "'string', 'description': 'Only used when `use_cpu` is set. Scipy ' 'padtype parameter of", "'The upsampling factor.' }, 'down': { 'type': 'integer', 'description': 'The downsampling factor.' },", "is False and ' # noqa: E131,E501 'runs on GPU via cusignal.', 'default':", "= ud.numerator down = ud.denominator else: up = self.conf['up'] down = self.conf['down'] if", "it takes precedence. Default ' '(\"kaiser\", 5.0)', 'default': '(\"kaiser\", 5.0)' }, 'gpupath': {", "for tuple if window not in windows_enum: # window should be a string", "design.' ' gpupath == False may be desirable if filter sizes ' #", "a tuple. try: window = literal_eval(window) except Exception: raise RuntimeError('Uknown window: {}'.format(window)) gpupath", "np.ndarray], PortsSpecSchema.optional: True }, } outports = { 'signal_out': {PortsSpecSchema.port_type: '${port:signal}'}, 'samplerate_out': {", "= ['constant', 'line', 'mean', 'median', 'maximum', 'minimum'] json = { 'title': 'Polyphase Filter", "If the port window ' 'is connected it takes precedence. Default ' '(\"kaiser\",", "original sample rate. Values beyond the boundary of the signal are assumed to", "}, 'gpupath': { 'type': 'boolean', 'description': 'gpupath - Optional path for filter design.'", "'integer', 'description': 'The downsampling factor.' }, 'axis': { 'type': 'integer', 'description': 'The axis", "signal_in, up, down, axis=axis, window=window, padtype=padtype, cval=cval) else: signal_out = curesamp( signal_in, up,", "windows. If a tuple refer to ' '`cusignal.windows.get_window`. The tuple format ' 'specifies", "is applied, and then it is downsampled by the factor `down`. The resulting", "specified as a string, a ' 'tuple, or a list. If a string", "to operate on a signal at a ' 'sampling frequency higher than the", "sample rate. ''' class CusignalResamplePolyNode(TemplateNodeMixin, Node): def init(self): TemplateNodeMixin.init(self) inports = { 'signal':", "code for tuple if window not in windows_enum: # window should be a", "in via ports, otherwise up/down is used. ' 'If both are set then", "the given axis using polyphase filtering. The signal is upsampled by the factor", "}, 'use_cpu': { 'type': 'boolean', 'description': 'use_cpu - Use CPU for computation via", "# noqa: E131,E501 } } } return ConfSchema(json=json) def process(self, inputs): signal_in =", "sample rate is ``up / down`` times the original sample rate. Values beyond", "and ' # noqa: E131,E501 'runs on GPU via cusignal.', 'default': False },", "via ' 'scipy::signal.resample_poly. Default is False and ' # noqa: E131,E501 'runs on", "}, 'axis': { 'type': 'integer', 'description': 'The axis of `x` that is resampled.", "<reponame>t-triobox/gQuant from ast import literal_eval from fractions import Fraction import numpy as np", "ConfSchema(json=json) def process(self, inputs): signal_in = inputs['signal'] samplerate = inputs.get('samplerate', None) new_samplerate =", "during the filtering step. Returns resampled array and new sample rate. ''' class", "be a string that is python code # evaluated to a tuple. try:", "'default': True }, 'use_cpu': { 'type': 'boolean', 'description': 'use_cpu - Use CPU for", "= self.conf['up'] down = self.conf['down'] if samplerate: samplerate = inputs['samplerate'] new_samplerate = samplerate", "else: window = self.conf.get('window', (\"kaiser\", 5.0)) if isinstance(window, str): windows_enum = list(_WINS_CONFIG.keys()) #", "'Only used when `use_cpu` is set. Value ' 'to use if `padtype=\"constant\"`. Default", "self.conf.get('axis', 0) if 'window' in inputs: window = input['window'] else: window = self.conf.get('window',", "(Node, PortsSpecSchema, ConfSchema) from greenflow.dataframe_flow.template_node_mixin import TemplateNodeMixin from ..windows import _WINS_CONFIG __all__ =", "{}} self.template_meta_setup(out_ports=meta_outports) def conf_schema(self): padtype_enum = ['constant', 'line', 'mean', 'median', 'maximum', 'minimum'] json", "so it ' 'should be designed to operate on a signal at a", "used when `samplerate` ' # noqa: E131,E501 'is passed in via ports, otherwise", "when `use_cpu` is set. Scipy ' 'padtype parameter of `resample_poly`. This is not", "argument as the string name of ' 'the window, and the next arguments", "GPU via cusignal.', 'default': False }, 'padtype': { 'type': 'string', 'description': 'Only used", "computation via ' 'scipy::signal.resample_poly. Default is False and ' # noqa: E131,E501 'runs", "tuple format ' 'specifies the first argument as the string name of '", "padtype_enum = ['constant', 'line', 'mean', 'median', 'maximum', 'minimum'] json = { 'title': 'Polyphase", "a ' 'sampling frequency higher than the original by a ' 'factor of", "code # evaluated to a tuple. try: window = literal_eval(window) except Exception: raise", "signal_in = inputs['signal'] samplerate = inputs.get('samplerate', None) new_samplerate = self.conf.get('new_samplerate', None) if new_samplerate", "'constant') cval = self.conf.get('cval') signal_out = siresamp( signal_in, up, down, axis=axis, window=window, padtype=padtype,", "and new sample rate. ''' class CusignalResamplePolyNode(TemplateNodeMixin, Node): def init(self): TemplateNodeMixin.init(self) inports =", "if isinstance(window, str): windows_enum = list(_WINS_CONFIG.keys()) # window could be a simple string", "Exception: raise RuntimeError('Uknown window: {}'.format(window)) gpupath = self.conf.get('gpupath', True) use_cpu = self.conf.get('use_cpu', False)", "'up': { 'type': 'integer', 'description': 'The upsampling factor.' }, 'down': { 'type': 'integer',", "{ 'type': 'number', 'description': 'Desired sample rate. Specify this or the ' 'up/down", "Fraction import numpy as np import cupy as cp from cusignal.filtering.resample import resample_poly" ]
[ "from translations.admin import TranslatableAdmin, TranslationInline from .models import Timezone, Continent, Country, City class", "import admin from translations.admin import TranslatableAdmin, TranslationInline from .models import Timezone, Continent, Country,", "class TimezoneAdmin(TranslatableAdmin): inlines = [TranslationInline] class ContinentAdmin(TranslatableAdmin): inlines = [TranslationInline] class CountryAdmin(TranslatableAdmin): inlines", "django.contrib import admin from translations.admin import TranslatableAdmin, TranslationInline from .models import Timezone, Continent,", "City class TimezoneAdmin(TranslatableAdmin): inlines = [TranslationInline] class ContinentAdmin(TranslatableAdmin): inlines = [TranslationInline] class CountryAdmin(TranslatableAdmin):", "Timezone, Continent, Country, City class TimezoneAdmin(TranslatableAdmin): inlines = [TranslationInline] class ContinentAdmin(TranslatableAdmin): inlines =", "[TranslationInline] class CityAdmin(TranslatableAdmin): inlines = [TranslationInline] admin.site.register(Timezone, TimezoneAdmin) admin.site.register(Continent, ContinentAdmin) admin.site.register(Country, CountryAdmin) admin.site.register(City,", "ContinentAdmin(TranslatableAdmin): inlines = [TranslationInline] class CountryAdmin(TranslatableAdmin): inlines = [TranslationInline] class CityAdmin(TranslatableAdmin): inlines =", "TranslationInline from .models import Timezone, Continent, Country, City class TimezoneAdmin(TranslatableAdmin): inlines = [TranslationInline]", "inlines = [TranslationInline] class ContinentAdmin(TranslatableAdmin): inlines = [TranslationInline] class CountryAdmin(TranslatableAdmin): inlines = [TranslationInline]", "= [TranslationInline] class CountryAdmin(TranslatableAdmin): inlines = [TranslationInline] class CityAdmin(TranslatableAdmin): inlines = [TranslationInline] admin.site.register(Timezone,", "Country, City class TimezoneAdmin(TranslatableAdmin): inlines = [TranslationInline] class ContinentAdmin(TranslatableAdmin): inlines = [TranslationInline] class", "import TranslatableAdmin, TranslationInline from .models import Timezone, Continent, Country, City class TimezoneAdmin(TranslatableAdmin): inlines", "CountryAdmin(TranslatableAdmin): inlines = [TranslationInline] class CityAdmin(TranslatableAdmin): inlines = [TranslationInline] admin.site.register(Timezone, TimezoneAdmin) admin.site.register(Continent, ContinentAdmin)", "= [TranslationInline] class CityAdmin(TranslatableAdmin): inlines = [TranslationInline] admin.site.register(Timezone, TimezoneAdmin) admin.site.register(Continent, ContinentAdmin) admin.site.register(Country, CountryAdmin)", "= [TranslationInline] class ContinentAdmin(TranslatableAdmin): inlines = [TranslationInline] class CountryAdmin(TranslatableAdmin): inlines = [TranslationInline] class", "translations.admin import TranslatableAdmin, TranslationInline from .models import Timezone, Continent, Country, City class TimezoneAdmin(TranslatableAdmin):", "from .models import Timezone, Continent, Country, City class TimezoneAdmin(TranslatableAdmin): inlines = [TranslationInline] class", ".models import Timezone, Continent, Country, City class TimezoneAdmin(TranslatableAdmin): inlines = [TranslationInline] class ContinentAdmin(TranslatableAdmin):", "admin from translations.admin import TranslatableAdmin, TranslationInline from .models import Timezone, Continent, Country, City", "inlines = [TranslationInline] class CountryAdmin(TranslatableAdmin): inlines = [TranslationInline] class CityAdmin(TranslatableAdmin): inlines = [TranslationInline]", "[TranslationInline] class ContinentAdmin(TranslatableAdmin): inlines = [TranslationInline] class CountryAdmin(TranslatableAdmin): inlines = [TranslationInline] class CityAdmin(TranslatableAdmin):", "class CityAdmin(TranslatableAdmin): inlines = [TranslationInline] admin.site.register(Timezone, TimezoneAdmin) admin.site.register(Continent, ContinentAdmin) admin.site.register(Country, CountryAdmin) admin.site.register(City, CityAdmin)", "TimezoneAdmin(TranslatableAdmin): inlines = [TranslationInline] class ContinentAdmin(TranslatableAdmin): inlines = [TranslationInline] class CountryAdmin(TranslatableAdmin): inlines =", "from django.contrib import admin from translations.admin import TranslatableAdmin, TranslationInline from .models import Timezone,", "class ContinentAdmin(TranslatableAdmin): inlines = [TranslationInline] class CountryAdmin(TranslatableAdmin): inlines = [TranslationInline] class CityAdmin(TranslatableAdmin): inlines", "class CountryAdmin(TranslatableAdmin): inlines = [TranslationInline] class CityAdmin(TranslatableAdmin): inlines = [TranslationInline] admin.site.register(Timezone, TimezoneAdmin) admin.site.register(Continent,", "import Timezone, Continent, Country, City class TimezoneAdmin(TranslatableAdmin): inlines = [TranslationInline] class ContinentAdmin(TranslatableAdmin): inlines", "[TranslationInline] class CountryAdmin(TranslatableAdmin): inlines = [TranslationInline] class CityAdmin(TranslatableAdmin): inlines = [TranslationInline] admin.site.register(Timezone, TimezoneAdmin)", "inlines = [TranslationInline] class CityAdmin(TranslatableAdmin): inlines = [TranslationInline] admin.site.register(Timezone, TimezoneAdmin) admin.site.register(Continent, ContinentAdmin) admin.site.register(Country,", "TranslatableAdmin, TranslationInline from .models import Timezone, Continent, Country, City class TimezoneAdmin(TranslatableAdmin): inlines =", "Continent, Country, City class TimezoneAdmin(TranslatableAdmin): inlines = [TranslationInline] class ContinentAdmin(TranslatableAdmin): inlines = [TranslationInline]" ]
[ "textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i in xrange(len(textdata))]) #go through the data applying", "initfs=open(\"initfs_Win32\",\"rb\") magicB=initfs.read(4) magic=unpack(\">I\",magicB)[0] if magic in (0x00D1CE00,0x00D1CE01): #the file is XOR encrypted and", "used; XOR the key with 123 right away initfs.seek(-260, 1) keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\")", "initfs.seek(-260, 1) keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i in xrange(len(textdata))])", "i in xrange(len(textdata))]) #go through the data applying one key byte on one", "is XOR encrypted and has a signature signature=initfs.read(292) key=[ord(initfs.read(1))^123 for i in xrange(260)]", "encrypted and has a signature signature=initfs.read(292) key=[ord(initfs.read(1))^123 for i in xrange(260)] #bytes 257", "<filename>lib/bin/captainsplexx/initfs_tools/make_initfs.py from struct import unpack initfs=open(\"initfs_Win32\",\"rb\") magicB=initfs.read(4) magic=unpack(\">I\",magicB)[0] if magic in (0x00D1CE00,0x00D1CE01): #the", "key with 123 right away initfs.seek(-260, 1) keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\")", "right away initfs.seek(-260, 1) keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i", "1) keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i in xrange(len(textdata))]) #go", "key=[ord(initfs.read(1))^123 for i in xrange(260)] #bytes 257 258 259 are not used; XOR", "257 258 259 are not used; XOR the key with 123 right away", "through the data applying one key byte on one data t.write(magicB) t.write(signature) t.write(keyB)", "data applying one key byte on one data t.write(magicB) t.write(signature) t.write(keyB) t.write(data) t.close()", "struct import unpack initfs=open(\"initfs_Win32\",\"rb\") magicB=initfs.read(4) magic=unpack(\">I\",magicB)[0] if magic in (0x00D1CE00,0x00D1CE01): #the file is", "if magic in (0x00D1CE00,0x00D1CE01): #the file is XOR encrypted and has a signature", "signature signature=initfs.read(292) key=[ord(initfs.read(1))^123 for i in xrange(260)] #bytes 257 258 259 are not", "259 are not used; XOR the key with 123 right away initfs.seek(-260, 1)", "in xrange(len(textdata))]) #go through the data applying one key byte on one data", "away initfs.seek(-260, 1) keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i in", "i in xrange(260)] #bytes 257 258 259 are not used; XOR the key", "a signature signature=initfs.read(292) key=[ord(initfs.read(1))^123 for i in xrange(260)] #bytes 257 258 259 are", "XOR the key with 123 right away initfs.seek(-260, 1) keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read()", "the data applying one key byte on one data t.write(magicB) t.write(signature) t.write(keyB) t.write(data)", "magicB=initfs.read(4) magic=unpack(\">I\",magicB)[0] if magic in (0x00D1CE00,0x00D1CE01): #the file is XOR encrypted and has", "258 259 are not used; XOR the key with 123 right away initfs.seek(-260,", "in (0x00D1CE00,0x00D1CE01): #the file is XOR encrypted and has a signature signature=initfs.read(292) key=[ord(initfs.read(1))^123", "the key with 123 right away initfs.seek(-260, 1) keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close()", "magic in (0x00D1CE00,0x00D1CE01): #the file is XOR encrypted and has a signature signature=initfs.read(292)", "unpack initfs=open(\"initfs_Win32\",\"rb\") magicB=initfs.read(4) magic=unpack(\">I\",magicB)[0] if magic in (0x00D1CE00,0x00D1CE01): #the file is XOR encrypted", "changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i in xrange(len(textdata))]) #go through the data applying one", "in xrange(260)] #bytes 257 258 259 are not used; XOR the key with", "(0x00D1CE00,0x00D1CE01): #the file is XOR encrypted and has a signature signature=initfs.read(292) key=[ord(initfs.read(1))^123 for", "keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i in xrange(len(textdata))]) #go through", "magic=unpack(\">I\",magicB)[0] if magic in (0x00D1CE00,0x00D1CE01): #the file is XOR encrypted and has a", "not used; XOR the key with 123 right away initfs.seek(-260, 1) keyB=initfs.read(260) initfs.close()", "123 right away initfs.seek(-260, 1) keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for", "xrange(260)] #bytes 257 258 259 are not used; XOR the key with 123", "with 123 right away initfs.seek(-260, 1) keyB=initfs.read(260) initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i]))", "changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i in xrange(len(textdata))]) #go through the data", "#bytes 257 258 259 are not used; XOR the key with 123 right", "xrange(len(textdata))]) #go through the data applying one key byte on one data t.write(magicB)", "#go through the data applying one key byte on one data t.write(magicB) t.write(signature)", "file is XOR encrypted and has a signature signature=initfs.read(292) key=[ord(initfs.read(1))^123 for i in", "import unpack initfs=open(\"initfs_Win32\",\"rb\") magicB=initfs.read(4) magic=unpack(\">I\",magicB)[0] if magic in (0x00D1CE00,0x00D1CE01): #the file is XOR", "#the file is XOR encrypted and has a signature signature=initfs.read(292) key=[ord(initfs.read(1))^123 for i", "t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i in xrange(len(textdata))]) #go through the data applying one key", "from struct import unpack initfs=open(\"initfs_Win32\",\"rb\") magicB=initfs.read(4) magic=unpack(\">I\",magicB)[0] if magic in (0x00D1CE00,0x00D1CE01): #the file", "XOR encrypted and has a signature signature=initfs.read(292) key=[ord(initfs.read(1))^123 for i in xrange(260)] #bytes", "are not used; XOR the key with 123 right away initfs.seek(-260, 1) keyB=initfs.read(260)", "initfs.close() changedinitfs=open(\"initfs_Win32.txt\",\"rb\") textdata=changedinitfs.read() changedinitfs.close() t=open(\"initfs_Win32_new\",\"wb\") data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i in xrange(len(textdata))]) #go through the", "for i in xrange(len(textdata))]) #go through the data applying one key byte on", "for i in xrange(260)] #bytes 257 258 259 are not used; XOR the", "data=\"\".join([chr(key[i%257]^ord(textdata[i])) for i in xrange(len(textdata))]) #go through the data applying one key byte", "has a signature signature=initfs.read(292) key=[ord(initfs.read(1))^123 for i in xrange(260)] #bytes 257 258 259", "and has a signature signature=initfs.read(292) key=[ord(initfs.read(1))^123 for i in xrange(260)] #bytes 257 258", "signature=initfs.read(292) key=[ord(initfs.read(1))^123 for i in xrange(260)] #bytes 257 258 259 are not used;" ]
[ "import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_alias(host): host.run_expect([0], 'sudo -u", "os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_alias(host): host.run_expect([0], 'sudo -u molecule", "import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_alias(host): host.run_expect([0], 'sudo -u molecule /bin/bash", "<reponame>PW999/home-assistant-ansible<filename>roles/aliasses/molecule/default/tests/test_default.py import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_alias(host): host.run_expect([0], 'sudo", "testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_alias(host): host.run_expect([0], 'sudo -u molecule /bin/bash -vilc", "testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') def test_alias(host): host.run_expect([0], 'sudo -u molecule /bin/bash -vilc ll')" ]
[ "끊기 def get_code_list_by_market(self, market_code): ''' 종목코드 리스트 받기 #0:장내, 10:코스닥 :param market_code: 시장코드", "= {} code_nm = code_nm.strip() stock_quantity = int(stock_quantity.strip()) buy_price = int(buy_price.strip()) learn_rate =", "%s\" % sPrevNext) print(\"계좌에 가지고 있는 종목은 %s \" % rows) if sPrevNext", "\"\": ls = line.split(\"\\t\") stock_code = ls[0] stock_name = ls[1] stock_price = int(ls[2].split(\"\\n\")[0])", "= 0 else: chegual_quantity = int(chegual_quantity) current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000", "chegual_quantity = int(chegual_quantity) current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000 current_price = abs(int(current_price))", "QTest.qWait(10000) self.read_code() self.screen_number_setting() QTest.qWait(5000) #실시간 수신 관련 함수 self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", self.screen_start_stop_real,", ": +(-)2520 b = abs(int(b)) c = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력", "deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"예수금\") self.deposit = int(deposit)", "sCode not in self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan})", "sRQName, i, \"주문번호\") order_status = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문상태\")", "True: moving_average_price_prev = 0 price_top_moving = False idx = 1 while True: if", "self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' : possible_quantity})", "주가가 계속 밑에 존재하는지 확인 prev_price = None if bottom_stock_price == True: moving_average_price_prev", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총수익률(%)\") self.total_profit_loss_rate = float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터", "QString)\", sTrCode, sRQName, i, \"수익률(%)\") # 수익률 : -000000001.94 current_price = self.dynamicCall(\"GetCommData(QString, QString,", "* 100 if asd['매매가능수량'] > 0 and (meme_rate > 5 or meme_rate <", "self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3 order_quan = int(order_quan) order_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) #", "if order_no in self.not_account_stock_dict: pass else: self.not_account_stock_dict[order_no] = {} self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm})", "# 출력 : +(-)2520 b = abs(int(b)) c = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비'])", "+= 1 # 해당부분 이평선이 가장 최근의 이평선 가격보다 낮은지 확인 if price_top_moving", "0: temp_screen += 1 self.screen_real_stock = str(temp_screen) if (cnt % 50) == 0:", "전\") elif value == '3': self.logging.logger.debug(\"장 시작\") elif value == \"2\": self.logging.logger.debug(\"장 종료,", "sRQName) for i in range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "abs(int(current_price)) stock_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan = int(stock_quan) like_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan", ": +(-)2530 k = abs(int(k)) if sCode not in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a})", "sRQName, i, \"보유수량\") # 보유수량 : 000000000000010 buy_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "0 not_chegual_quan = int(not_chegual_quan) order_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수 order_gubun", "import sys from PyQt5.QAxContainer import * from PyQt5.QtCore import * from config.errorCode import", "/ 120 if moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx <= 20: self.logging.logger.debug(\"20일 동안 주가가", "order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price})", "프로그램 동작\", fallback=\"주식 자동화 프로그램 동작\", text=\"주식 자동화 프로그램이 동작 되었습니다.\" ) def", "i, \"고가\") # 출력 : 000070 low_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "이벤트 self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한", "QString)\", \"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\", \"opt10075\",", "and check_price > prev_price: self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자 이평선 가격보다 낮은 것 확인\")", "자동화 프로그램 동작\", fallback=\"주식 자동화 프로그램 동작\", text=\"주식 자동화 프로그램이 동작 되었습니다.\" )", "abs(int(first_buy_price)) ######## 새로 들어온 주문이면 주문번호 할당 if order_number not in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number:", "구함 total_price = 0 for value in self.calcul_data[:120]: total_price += int(value[1]) moving_average_price =", "#슬랙 동작 #print(\"kiwoom() class start. \") self.logging.logger.debug(\"Kiwoom() class start.\") ####### event loop를 실행하기", "QEventLoop() # 예수금 요청용 이벤트루프 self.calculator_event_loop = QEventLoop() ######################################### ########### 전체 종목 관리", "self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if stock_quan == 0: del self.jango_dict[sCode] #송수신", "%s\" % self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif sRQName == \"계좌평가잔고내역요청\": total_buy_money = self.dynamicCall(\"GetCommData(QString, QString,", "\"거래대금\") # 출력 : 000070 date = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "000070 start_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"시가\") # 출력", "sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name = stock_name.strip() current_price =", "+(-)2530 j = abs(int(j)) k = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 :", "‘’]. […]] cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) self.logging.logger.debug(\"남은 일자 수 %s\" %", "\"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2,", "%s \" % self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif sRQName == \"주식일봉차트조회\": code = self.dynamicCall(\"GetCommData(QString, QString,", "sTrCode, sRecordName, sPrevNext): if sRQName == \"예수금상세현황요청\": deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "and e > meme_price: order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int,", "함수 self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for code in self.portfolio_stock_dict.keys():", "전달 성공\") del self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문 전달 실패\") elif sCode in self.jango_dict.keys(): jd", "사용할 비율 self.output_deposit = 0 #출력가능 금액 self.total_profit_loss_money = 0 #총평가손익금액 self.total_profit_loss_rate =", "관련한 이벤트 def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") # 로그인 요청 시그널 self.login_event_loop.exec_() # 이벤트루프 실행", "= \"4000\" #계산용 스크린 번호 self.screen_real_stock = \"5000\" #종목별 할당할 스크린 번호 self.screen_meme_stock", "\"0\") for code in self.portfolio_stock_dict.keys(): screen_num = self.portfolio_stock_dict[code]['스크린번호'] fids = self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString,", "= account_list.split(';')[0] self.account_num = account_num self.logging.logger.debug(\"계좌번호 : %s\" % account_num) def detail_account_info(self, sPrevNext=\"0\"):", "learn_rate = float(learn_rate.strip()) current_price = int(current_price.strip()) total_chegual_price = int(total_chegual_price.strip()) possible_quantity = int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\":", "100 if jd['주문가능수량'] > 0 and (meme_rate > 5 or meme_rate < -5):", "0: del self.not_account_stock_dict[order_num] # 실시간 체결 정보 def chejan_slot(self, sGubun, nItemCnt, sFidList): if", "0115061 마지막 주문번호 order_status = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인, 체결 order_quan", "self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f})", "from PyQt5.QtCore import * from config.errorCode import * from PyQt5.QtTest import * from", "<= moving_average_price and moving_average_price <= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가 120이평선 아래에 걸쳐있는 것 확인\")", "stock_code = ls[0] stock_name = ls[1] stock_price = int(ls[2].split(\"\\n\")[0]) stock_price = abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name,", "QString)\", sTrCode, sRQName, i, \"주문번호\") order_status = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "elif sRealType == \"주식체결\": a = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS", "d = float(d) e = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520", "self.realType.REALTYPE['주문체결']['종목명']) stock_name = stock_name.strip() origin_order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse :", "account_num self.logging.logger.debug(\"계좌번호 : %s\" % account_num) def detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num)", "전달 성공\") else: self.logging.logger.debug(\"매도주문 전달 실패\") elif d > 2.0 and sCode not", "# 레지스트리에 저장된 api 모듈 불러오기 def event_slots(self): self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트", "= True check_price = int(self.calcul_data[0][6]) # 과거 일봉 데이터를 조회하면서 120일 이평선보다 주가가", "= int(total_buy_money) total_profit_loss_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총평가손익금액\") self.total_profit_loss_money", "QString, int, QString)\", sTrCode, sRQName, i, \"매입가\") # 매입가 : 000000000054100 learn_rate =", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name = stock_name.strip() current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price = abs(int(current_price))", "None if int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price <= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가 120이평선 아래에", ": -000000001.94 current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") #", "f = open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm, str(self.calcul_data[0][1]))) f.close() elif pass_success", "self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결 종목 : %s \" % self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif sRQName ==", "통과 %s \" % sCode) result = (self.use_money * 0.1) / e quantity", "sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530 i = abs(int(i)) j = self.dynamicCall(\"GetCommRealData(QString, int)\",", "account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name", "QString)\", screen_num, code, fids, \"1\") self.slack.notification( pretext=\"주식자동화 프로그램 동작\", title=\"주식 자동화 프로그램 동작\",", "######################################### ########### 전체 종목 관리 self.all_stock_dict = {} ########################### ####### 계좌 관련된 변수", "QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString,", "k = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530 k = abs(int(k))", "self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if", "int)\", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98 d = float(d) e = self.dynamicCall(\"GetCommRealData(QString,", "#계좌번호 가져오기 self.detail_account_info() #예수금 요청 시그널 포함 self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함 QTimer.singleShot(5000,", "self.detail_account_info_event_loop.exec_() def not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString,", "\"예수금상세현황요청\": deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"예수금\") self.deposit =", "= ls[1] stock_price = int(ls[2].split(\"\\n\")[0]) stock_price = abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close() def merge_dict(self):", "\"미체결수량\") ok_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"체결량\") code =", "code_list.split(';')[:-1] return code_list def calculator_fnc(self): ''' 종목 분석관련 함수 모음 :return: ''' code_list", "sTrCode, sRQName, i, \"주문번호\") order_status = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "sRQName == \"예수금상세현황요청\": deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"예수금\")", "들어온 주문이면 주문번호 할당 if order_number not in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode})", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"저가\") # 출력 : 000070", "default: 0 not_chegual_quan = int(not_chegual_quan) order_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수", "출력: -6000 first_buy_price = abs(int(first_buy_price)) ######## 새로 들어온 주문이면 주문번호 할당 if order_number", "stock_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan = int(stock_quan) like_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan =", "self.jango_dict[sCode] #송수신 메세지 get def msg_slot(self, sScrNo, sRQName, sTrCode, msg): self.logging.logger.debug(\"스크린: %s, 요청이름:", "진다. for line in lines: #줄바꿈된 내용들이 한줄 씩 읽어와진다. if line !=", "= \"5000\" #종목별 할당할 스크린 번호 self.screen_meme_stock = \"6000\" #종목별 할당할 주문용스크린 번호", "= int(self.calcul_data[idx][7]) break idx += 1 # 해당부분 이평선이 가장 최근의 이평선 가격보다", "것 확인\") self.logging.logger.debug(\"포착된 부분의 저가가 오늘자 주가의 고가보다 낮은지 확인\") pass_success = True", "위에 있으면 조건 통과 못함\") price_top_moving = False break elif int(self.calcul_data[idx][7]) > moving_average_price_prev", "\"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info)", "code = code.strip() code_nm = code_nm.strip() order_no = int(order_no.strip()) order_status = order_status.strip() order_quantity", "< -5): order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString,", "if len(self.calcul_data[idx:]) < 120: # 120일치가 있는지 계속 확인 self.logging.logger.debug(\"120일치가 없음\") break total_price", "동작\", text=\"주식 자동화 프로그램이 동작 되었습니다.\" ) def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된", "주가의 고가보다 낮은지 확인\") pass_success = True if pass_success == True: self.logging.logger.debug(\"조건부 통과됨\")", "k}) if sCode in self.account_stock_dict.keys() and sCode not in self.jango_dict.keys(): asd = self.account_stock_dict[sCode]", "# 출력: -6000 current_price = abs(int(current_price)) first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010", "data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy()) if sPrevNext == \"2\":", "자동화 프로그램이 동작 되었습니다.\" ) def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된 api 모듈", "int, QString, QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1, sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"] )", "elif int(sGubun) == 1: #잔고 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:]", "j}) self.portfolio_stock_dict[sCode].update({\"저가\": k}) if sCode in self.account_stock_dict.keys() and sCode not in self.jango_dict.keys(): asd", "인자로 던져주면 파일 내용을 읽어 오겠다는 뜻이다. lines = f.readlines() #파일에 있는 내용들이", "시작\") elif value == \"2\": self.logging.logger.debug(\"장 종료, 동시호가로 넘어감\") elif value == \"4\":", "= int(chegual_price) chegual_quantity = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default : '' if", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124 매수일때, -2034 매도일 때", "meme_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price =", "i, \"일자\") # 출력 : 000070 start_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "config.log_class import * # from config.slack import * class Kiwoom(QAxWidget): def __init__(self): super().__init__()", "000070 low_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"저가\") # 출력", "QString, QString, QString)\", screen_num, code, fids, \"1\") self.slack.notification( pretext=\"주식자동화 프로그램 동작\", title=\"주식 자동화", "self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트 def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\")", "int, QString)\", sTrCode, sRQName, i, \"주문가격\") order_gubun = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "+= 1 # 실시간 데이터 얻어오기 def realdata_slot(self, sCode, sRealType, sRealData): if sRealType", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총수익률(%)\") self.total_profit_loss_rate = float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청", "stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price})", "낮은 것 확인\") self.logging.logger.debug(\"포착된 부분의 저가가 오늘자 주가의 고가보다 낮은지 확인\") pass_success =", "-6000 first_buy_price = abs(int(first_buy_price)) ######## 새로 들어온 주문이면 주문번호 할당 if order_number not", "for code in self.portfolio_stock_dict.keys(): screen_num = self.portfolio_stock_dict[code]['스크린번호'] fids = self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString, QString,", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래량\") # 출력 : 000070 trading_value", "끊기 self.logging.logger.debug(\"%s / %s : KOSDAQ Stock Code : %s is updating... \"", "self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\":", "{} self.jango_dict = {} ######################## ########### 종목 분석 용 self.calcul_data = [] ##########################################", "####### 요청 스크린 번호 self.screen_my_info = \"2000\" #계좌 관련한 스크린 번호 self.screen_calculation_stock =", "abs(int(h)) i = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530 i =", "self.detail_account_info_event_loop.exec_() def detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString,", "''' 종목코드 리스트 받기 #0:장내, 10:코스닥 :param market_code: 시장코드 입력 :return: ''' code_list", "데이터를 조회하면서 120일 이평선보다 주가가 계속 밑에 존재하는지 확인 prev_price = None if", "int(self.screen_meme_stock) if (cnt % 50) == 0: temp_screen += 1 self.screen_real_stock = str(temp_screen)", "# 출력 : 000070 start_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "+매수 order_gubun = order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028' chegual_price =", "성공\") else: self.logging.logger.debug(\"매수취소 전달 실패\") elif not_quantity == 0: del self.not_account_stock_dict[order_num] # 실시간", "learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' : possible_quantity}) self.logging.logger.debug(\"sPreNext : %s\" % sPrevNext)", "super().__init__() self.realType = RealType() self.logging = Logging() # self.slack = Slack() #슬랙 동작", "self.calcul_data == None or len(self.calcul_data) < 120: pass_success = False else: # 120일", "self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\":", "sRQName, i, \"거래량\") # 출력 : 000070 trading_value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "할당 cnt = 0 for code in screen_overwrite: temp_screen = int(self.screen_real_stock) meme_screen =", "code in self.account_stock_dict: # dictionary 에 해당 종목이 있나 확인 pass else: self.account_stock_dict[code]", "self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결 종목 : %s \"", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530 j = abs(int(j)) k =", "# 출력 : +(-)2515 f = abs(int(f)) g = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['거래량'])", "start.\") ####### event loop를 실행하기 위한 변수모음 self.login_event_loop = QEventLoop() #로그인 요청용 이벤트루프", "루프를 종료한다. self.login_event_loop.exit() def get_account_info(self): account_list = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") # 계좌번호 반환 account_num", "current_price = int(current_price.strip()) total_chegual_price = int(total_chegual_price.strip()) possible_quantity = int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity})", "불러오기 def event_slots(self): self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련", "QString)\", sTrCode, sRQName, i, \"주문수량\") order_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", ") if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") del self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문 전달", "del self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문 전달 실패\") elif sCode in self.jango_dict.keys(): jd = self.jango_dict[sCode]", "# 출력 : A039423 // 알파벳 A는 장내주식, J는 ELW종목, Q는 ETN종목 code", "시그널 포함 self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함 QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에 미체결 종목들", "sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun})", "씩 읽어와진다. if line != \"\": ls = line.split(\"\\t\") stock_code = ls[0] stock_name", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010 first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) #", "총매입가 total_buy_price = int(total_buy_price) meme_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price =", "= 0.0 #총수익률(%) ######################################## ######## 종목 정보 가져오기 self.portfolio_stock_dict = {} self.jango_dict =", "or meme_rate < -5): order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int,", "= 0 #예수금 self.use_money = 0 #실제 투자에 사용할 금액 self.use_money_percent = 0.5", "sTrCode, sRQName, 0, \"총매입금액\") self.total_buy_money = int(total_buy_money) total_profit_loss_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "code)) self.day_kiwoom_db(code=code) def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다 딜레이를 준다. self.dynamicCall(\"SetInputValue(QString, QString)\",", "code in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000) self.file_delete() self.calculator_fnc() sys.exit() elif sRealType", "QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") # 출력 : 000070 value =", "이벤트 def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") # 로그인 요청 시그널 self.login_event_loop.exec_() # 이벤트루프 실행 def", "0 for value in self.calcul_data[:120]: total_price += int(value[1]) moving_average_price = total_price / 120", "%s is updating... \" % (idx + 1, len(code_list), code)) self.day_kiwoom_db(code=code) def day_kiwoom_db(self,", "000070 value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래량\") # 출력", "출력: 5 default : '' if chegual_quantity == '': chegual_quantity = 0 else:", "range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목코드\") code_nm =", "한줄 씩 읽어와진다. if line != \"\": ls = line.split(\"\\t\") stock_code = ls[0]", "self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot) def", "J는 ELW종목, Q는 ETN종목 code = code.strip()[1:] code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "QString)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num)", "QString, int, QString)\", sTrCode, sRQName, 0, \"종목코드\") code = code.strip() # data =", "cnt += 1 # 실시간 데이터 얻어오기 def realdata_slot(self, sCode, sRealType, sRealData): if", "value == \"4\": self.logging.logger.debug(\"3시30분 장 종료\") for code in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'],", "possible_quantity}) self.logging.logger.debug(\"sPreNext : %s\" % sPrevNext) print(\"계좌에 가지고 있는 종목은 %s \" %", "abs(int(b)) c = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520 c =", "통과 못함\") price_top_moving = False break elif int(self.calcul_data[idx][7]) > moving_average_price_prev and idx >", "list(self.not_account_stock_dict) for order_num in not_meme_list: code = self.not_account_stock_dict[order_num][\"종목코드\"] meme_price = self.not_account_stock_dict[order_num]['주문가격'] not_quantity =", "싱글데이터 : %s - %s - %s\" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate)) rows =", "abs(int(k)) if sCode not in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c})", "‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’,", "not in self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\":", "= {} self.not_account_stock_dict = {} self.deposit = 0 #예수금 self.use_money = 0 #실제", "self.use_money = 0 #실제 투자에 사용할 금액 self.use_money_percent = 0.5 #예수금에서 실제 사용할", "for code in self.portfolio_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) # 스크린번호 할당", "= 0 else: chegual_price = int(chegual_price) chegual_quantity = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530 k = abs(int(k)) if", "ELW종목, Q는 ETN종목 code = code.strip()[1:] code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "* from PyQt5.QtTest import * from config.kiwoomType import * from config.log_class import *", "이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트 def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") # 로그인 요청", "4,8:장종료(30분), 9:장마감) value = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, fid) if value == '0': self.logging.logger.debug(\"장", "int, QString)\", sTrCode, sRQName, i, \"종목코드\") code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "range(cnt): data = [] current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "수 있게 변환해 주는 함수 self.event_slots() # 키움과 연결하기 위한 시그널 / 슬롯", "것 확인\") bottom_stock_price = True check_price = int(self.calcul_data[0][6]) # 과거 일봉 데이터를 조회하면서", "\"출금가능금액\") self.output_deposit = int(output_deposit) self.logging.logger.debug(\"예수금 : %s\" % self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif sRQName", "QString)\", sTrCode, sRQName, i, \"저가\") # 출력 : 000070 data.append(\"\") data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip())", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") # 출력 : 000070 value", "= {} ########################### ####### 계좌 관련된 변수 self.account_stock_dict = {} self.not_account_stock_dict = {}", "sRQName, 0, \"예수금\") self.deposit = int(deposit) use_money = float(self.deposit) * self.use_money_percent self.use_money =", "print(\"계좌에 가지고 있는 종목은 %s \" % rows) if sPrevNext == \"2\": self.detail_account_mystock(sPrevNext=\"2\")", "[] ########################################## ####### 요청 스크린 번호 self.screen_my_info = \"2000\" #계좌 관련한 스크린 번호", "2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감) value = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, fid) if value ==", "100 if asd['매매가능수량'] > 0 and (meme_rate > 5 or meme_rate < -5):", "else: self.detail_account_info_event_loop.exit() elif sRQName == \"실시간미체결요청\": rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for", "가격보다 낮은 것 확인\") self.logging.logger.debug(\"포착된 부분의 저가가 오늘자 주가의 고가보다 낮은지 확인\") pass_success", "출력: 21000 order_price = int(order_price) not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default:", "성공\") else: self.logging.logger.debug(\"매수주문 전달 실패\") not_meme_list = list(self.not_account_stock_dict) for order_num in not_meme_list: code", "\"opt10075\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext): if sRQName", "loop를 실행하기 위한 변수모음 self.login_event_loop = QEventLoop() #로그인 요청용 이벤트루프 self.detail_account_info_event_loop = QEventLoop()", "‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]] cnt", "int, QString)\", sTrCode, sRQName, i, \"보유수량\") # 보유수량 : 000000000000010 buy_price = self.dynamicCall(\"GetCommData(QString,", "# 해당부분 이평선이 가장 최근의 이평선 가격보다 낮은지 확인 if price_top_moving == True:", "moving_average_price and moving_average_price <= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가 120이평선 아래에 걸쳐있는 것 확인\") bottom_stock_price", "line != \"\": ls = line.split(\"\\t\") stock_code = ls[0] stock_name = ls[1] stock_price", "self.get_ocx_instance() #OCX 방식을 파이썬에 사용할 수 있게 변환해 주는 함수 self.event_slots() # 키움과", "이벤트 self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) # 실시간", "\"고가\") # 출력 : 000070 low_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "확인 bottom_stock_price = False check_price = None if int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price", "낮은지 확인\") pass_success = True if pass_success == True: self.logging.logger.debug(\"조건부 통과됨\") code_nm =", "self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price})", "QString, int, QString)\", sTrCode, sRQName, i, \"주문구분\") # -매도, +매수, -매도정정, +매수정정 not_quantity", "in self.portfolio_stock_dict.keys(): screen_num = self.portfolio_stock_dict[code]['스크린번호'] fids = self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", screen_num,", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price = abs(int(first_buy_price)) if", "calculator_fnc(self): ''' 종목 분석관련 함수 모음 :return: ''' code_list = self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수", "있는 구간 존재 self.logging.logger.debug(\"120일치 이평선 위에 있는 구간 확인됨\") price_top_moving = True prev_price", "self.calculator_event_loop.exit() def stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) # 스크린번호 연결 끊기 def get_code_list_by_market(self, market_code):", "\"현재가\") # 출력 : 000070 value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "order_status = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인, 체결 order_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량'])", "price_top_moving = False idx = 1 while True: if len(self.calcul_data[idx:]) < 120: #", "self.portfolio_stock_dict[sCode].update({\"저가\": k}) if sCode in self.account_stock_dict.keys() and sCode not in self.jango_dict.keys(): asd =", "code in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif code not in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code:", "code_nm = code_nm.strip() stock_quantity = int(stock_quantity.strip()) buy_price = int(buy_price.strip()) learn_rate = float(learn_rate.strip()) current_price", "int, QString)\", sTrCode, sRQName, i, \"주문수량\") order_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가 완료됐으면 이벤트 루프를 종료한다. self.login_event_loop.exit() def get_account_info(self): account_list = self.dynamicCall(\"GetLoginInfo(QString)\",", "= str(temp_screen) if (cnt % 50) == 0: meme_screen += 1 self.screen_meme_stock =", "total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입금액\") possible_quantity = self.dynamicCall(\"GetCommData(QString,", "int, QString)\", sTrCode, sRQName, i, \"현재가\") # 현재가 : 000000003450 total_chegual_price = self.dynamicCall(\"GetCommData(QString,", "QString, int, QString)\", \"예수금상세현황요청\", \"opw00001\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\",", "sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\",", "+= int(value[1]) moving_average_price = total_price / 120 # 오늘자 주가가 120일 이평선에 걸쳐있는지", "= int(chegual_quantity) current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000 current_price = abs(int(current_price)) first_sell_price", "self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for code in self.portfolio_stock_dict.keys(): screen_num", "self.dynamicCall(\"GetMasterCodeName(QString)\", code) f = open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm, str(self.calcul_data[0][1]))) f.close()", "quantity = int(result) order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int,", "주문번호 order_status = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인, 체결 order_quan = self.dynamicCall(\"GetChejanData(int)\",", "use_money = float(self.deposit) * self.use_money_percent self.use_money = int(use_money) self.use_money = self.use_money / 4", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입가\") # 매입가 : 000000000054100 learn_rate", "= abs(int(g)) h = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124 h", "종료\") for code in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000) self.file_delete() self.calculator_fnc() sys.exit()", "default : '' if chegual_quantity == '': chegual_quantity = 0 else: chegual_quantity =", "해당 종목이 있나 확인 pass else: self.account_stock_dict[code] = {} code_nm = code_nm.strip() stock_quantity", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목코드\") code_nm = self.dynamicCall(\"GetCommData(QString, QString, int,", "i, \"보유수량\") # 보유수량 : 000000000000010 buy_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "self.slack = Slack() #슬랙 동작 #print(\"kiwoom() class start. \") self.logging.logger.debug(\"Kiwoom() class start.\") #######", "code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num] ) if order_success == 0: self.logging.logger.debug(\"매수취소 전달 성공\")", "self.account_num, 2, sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문", "account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name", "20: self.logging.logger.debug(\"20일 동안 주가가 120일 이평선과 같거나 위에 있으면 조건 통과 못함\") price_top_moving", "# 보유수량 : 000000000000010 buy_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트 def", "int(not_chegual_quan) order_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수 order_gubun = order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name =", "0: self.logging.logger.debug(\"매도주문 전달 성공\") else: self.logging.logger.debug(\"매도주문 전달 실패\") elif d > 2.0 and", "전달 실패\") elif sCode in self.jango_dict.keys(): jd = self.jango_dict[sCode] meme_rate = (b -", "%s \" % rows) if sPrevNext == \"2\": self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit() elif sRQName", "sPrevNext) print(\"계좌에 가지고 있는 종목은 %s \" % rows) if sPrevNext == \"2\":", "\"주문상태\") # 접수,확인,체결 order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문수량\")", "float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 : %s - %s - %s\" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate))", "sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520 e = abs(int(e)) f = self.dynamicCall(\"GetCommRealData(QString, int)\",", "e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\": k})", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028' chegual_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default", "이평선과 같거나 위에 있으면 조건 통과 못함\") price_top_moving = False break elif int(self.calcul_data[idx][7])", "= 1 while True: if len(self.calcul_data[idx:]) < 120: # 120일치가 있는지 계속 확인", "2, sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달", "모듈 불러오기 def event_slots(self): self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청", "self.jango_dict.keys(): jd = self.jango_dict[sCode] meme_rate = (b - jd['매입단가']) / jd['매입단가'] * 100", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price = abs(int(buy_price)) total_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의", "self.detail_account_info_event_loop.exit() elif sRQName == \"실시간미체결요청\": rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i", "code_list = self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수 %s \" % len(code_list)) for idx, code in", "QString, int, QString)\", sTrCode, sRQName, 0, \"출금가능금액\") self.output_deposit = int(output_deposit) self.logging.logger.debug(\"예수금 : %s\"", "self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량':", "이벤트 시그널 / 슬롯 연결 self.signal_login_commConnect() #로그인 요청 시그널 포함 self.get_account_info() #계좌번호 가져오기", "self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def trdata_slot(self, sScrNo, sRQName,", "stock_name = stock_name.strip() current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price = abs(int(current_price)) stock_quan = self.dynamicCall(\"GetChejanData(int)\",", "동안 주가가 120일 이평선과 같거나 위에 있으면 조건 통과 못함\") price_top_moving = False", "QString, QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1, sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"] ) if", "first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000 first_buy_price = abs(int(first_buy_price))", "딜레이를 준다. self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code) self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\") if date !=", "종료한다. self.login_event_loop.exit() def get_account_info(self): account_list = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") # 계좌번호 반환 account_num =", "order_price = int(order_price.strip()) order_gubun = order_gubun.strip().lstrip('+').lstrip('-') not_quantity = int(not_quantity.strip()) ok_quantity = int(ok_quantity.strip()) if", "def read_code(self): if os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에 파일이 있는지 체크한다. f = open(\"files/condition_stock.txt\",", "QString)\", sTrCode, sRQName, 0, \"총평가손익금액\") self.total_profit_loss_money = int(total_profit_loss_money) total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString, QString, int,", "code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") order_no = self.dynamicCall(\"GetCommData(QString,", "order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막 주문번호 order_status = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태'])", "(0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감) value = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, fid) if value", "self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\":", "#5초 뒤에 미체결 종목들 가져오기 실행 ######################################### QTest.qWait(10000) self.read_code() self.screen_number_setting() QTest.qWait(5000) #실시간 수신", "int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price <= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가 120이평선 아래에 걸쳐있는 것", "sTrCode, sRQName, i, \"주문상태\") # 접수,확인,체결 order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "= stock_name.strip() origin_order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse : \"000000\" order_number", ": 000000000000010 buy_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입가\") #", "self.total_buy_money = int(total_buy_money) total_profit_loss_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총평가손익금액\")", "not in screen_overwrite: screen_overwrite.append(code) #포트폴리로에 담겨있는 종목들 for code in self.portfolio_stock_dict.keys(): if code", "# Tr서버로 전송 -Transaction self.calculator_event_loop.exec_() def read_code(self): if os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에 파일이", "buy_price, learn_rate, current_price)) if code in self.account_stock_dict: # dictionary 에 해당 종목이 있나", "sTrCode, sRQName, i, \"종목코드\") code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "HHMMSS b = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520 b =", "def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된 api 모듈 불러오기 def event_slots(self): self.OnEventConnect.connect(self.login_slot) #", "\"4000\" #계산용 스크린 번호 self.screen_real_stock = \"5000\" #종목별 할당할 스크린 번호 self.screen_meme_stock =", "idx, code in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린 연결 끊기 self.logging.logger.debug(\"%s / %s", "0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num] ) if order_success == 0: self.logging.logger.debug(\"매수취소 전달 성공\") else:", "self.calculator_event_loop = QEventLoop() ######################################### ########### 전체 종목 관리 self.all_stock_dict = {} ########################### #######", "* from config.kiwoomType import * from config.log_class import * # from config.slack import", "i, \"현재가\") # 출력 : 000070 value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "마지막 주문번호 order_status = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인, 체결 order_quan =", "jd['매입단가'] * 100 if jd['주문가능수량'] > 0 and (meme_rate > 5 or meme_rate", "int(output_deposit) self.logging.logger.debug(\"예수금 : %s\" % self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif sRQName == \"계좌평가잔고내역요청\": total_buy_money", "self.account_num, 1, sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매수주문", "= int(stock_quantity.strip()) buy_price = int(buy_price.strip()) learn_rate = float(learn_rate.strip()) current_price = int(current_price.strip()) total_chegual_price =", "temp_screen += 1 self.screen_real_stock = str(temp_screen) if (cnt % 50) == 0: meme_screen", "int(order_quan) order_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000 order_price = int(order_price) not_chegual_quan =", "요청이름: %s, tr코드: %s --- %s\" %(sScrNo, sRQName, sTrCode, msg)) #파일 삭제 def", "self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\":", "!= \"\": ls = line.split(\"\\t\") stock_code = ls[0] stock_name = ls[1] stock_price =", ": KOSDAQ Stock Code : %s is updating... \" % (idx + 1,", "-6010 first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000 first_buy_price =", "- 종목명: %s - 보유수량: %s - 매입가:%s - 수익률: %s - 현재가:", "title=\"주식 자동화 프로그램 동작\", fallback=\"주식 자동화 프로그램 동작\", text=\"주식 자동화 프로그램이 동작 되었습니다.\"", "screen_overwrite: screen_overwrite.append(code) #미체결에 있는 종목들 for order_number in self.not_account_stock_dict.keys(): code = self.not_account_stock_dict[order_number]['종목코드'] if", "= self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) self.logging.logger.debug(\"남은 일자 수 %s\" % cnt) for i", "QString, int, QString)\", sTrCode, sRQName, i, \"시가\") # 출력 : 000070 high_price =", "\"거래량\") # 출력 : 000070 trading_value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if stock_quan", "sRQName, i, \"종목코드\") code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\")", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문상태\") # 접수,확인,체결 order_quantity =", "# 스크린번호 할당 cnt = 0 for code in screen_overwrite: temp_screen = int(self.screen_real_stock)", "and (meme_rate > 5 or meme_rate < -5): order_success = self.dynamicCall( \"SendOrder(QString, QString,", "# 실시간 이벤트 시그널 / 슬롯 연결 self.signal_login_commConnect() #로그인 요청 시그널 포함 self.get_account_info()", "#잔고 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명'])", "QString)\", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for code in self.portfolio_stock_dict.keys(): screen_num = self.portfolio_stock_dict[code]['스크린번호'] fids", "금액 self.use_money_percent = 0.5 #예수금에서 실제 사용할 비율 self.output_deposit = 0 #출력가능 금액", "self.screen_my_info) self.detail_account_info_event_loop.exec_() def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext): if sRQName == \"예수금상세현황요청\":", "date = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"일자\") # 출력 :", "total_chegual_price = int(total_chegual_price.strip()) possible_quantity = int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\":", "체크한다. f = open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") # \"r\"을 인자로 던져주면 파일 내용을 읽어", "in self.account_stock_dict.keys() and sCode not in self.jango_dict.keys(): asd = self.account_stock_dict[sCode] meme_rate = (b", "for value in self.calcul_data[:120]: total_price += int(value[1]) moving_average_price = total_price / 120 #", "트랜잭션 요청 관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot)", "self.jango_dict.keys(): asd = self.account_stock_dict[sCode] meme_rate = (b - asd['매입가']) / asd['매입가'] * 100", "realdata_slot(self, sCode, sRealType, sRealData): if sRealType == \"장시작시간\": fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전,", "self.logging.logger.debug(\"Kiwoom() class start.\") ####### event loop를 실행하기 위한 변수모음 self.login_event_loop = QEventLoop() #로그인", "= stock_name.strip() current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price = abs(int(current_price)) stock_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량'])", ": 000070 data.append(\"\") data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy()) if", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"예수금\") self.deposit = int(deposit) use_money", "== 0: #주문체결 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name =", "in self.account_stock_dict: # dictionary 에 해당 종목이 있나 확인 pass else: self.account_stock_dict[code] =", "in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000) self.file_delete() self.calculator_fnc() sys.exit() elif sRealType ==", "요청 시그널 포함 QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에 미체결 종목들 가져오기 실행 ######################################### QTest.qWait(10000)", "data.append(\"\") data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy()) if sPrevNext ==", "확인\") pass_success = True if pass_success == True: self.logging.logger.debug(\"조건부 통과됨\") code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\",", "not_meme_list = list(self.not_account_stock_dict) for order_num in not_meme_list: code = self.not_account_stock_dict[order_num][\"종목코드\"] meme_price = self.not_account_stock_dict[order_num]['주문가격']", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString,", "sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124 매수일때, -2034 매도일 때 g = abs(int(g))", "login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가 완료됐으면 이벤트 루프를 종료한다. self.login_event_loop.exit() def get_account_info(self): account_list", "else: chegual_price = int(chegual_price) chegual_quantity = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default :", "# 120일치가 있는지 계속 확인 self.logging.logger.debug(\"120일치가 없음\") break total_price = 0 for value", "self.calcul_data.clear() self.calculator_event_loop.exit() def stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) # 스크린번호 연결 끊기 def get_code_list_by_market(self,", ": 000070 trading_value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래대금\") #", "origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price})", "i, \"주문상태\") # 접수,확인,체결 order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS b = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 :", "출력 : +(-)2520 e = abs(int(e)) f = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) #", "self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\":", "asd = self.account_stock_dict[sCode] meme_rate = (b - asd['매입가']) / asd['매입가'] * 100 if", "= abs(int(h)) i = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530 i", "QString)\", sTrCode, sRQName, i, \"시가\") # 출력 : 000070 high_price = self.dynamicCall(\"GetCommData(QString, QString,", "i, \"수익률(%)\") # 수익률 : -000000001.94 current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "= int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\":", "int, QString)\", sTrCode, sRQName, i, \"고가\") # 출력 : 000070 low_price = self.dynamicCall(\"GetCommData(QString,", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default : '' if chegual_price == '': chegual_price", "order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str})", "출력 : 000070 value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래량\")", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date) self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock) #", "not in screen_overwrite: screen_overwrite.append(code) #미체결에 있는 종목들 for order_number in self.not_account_stock_dict.keys(): code =", "= 0 #총평가손익금액 self.total_profit_loss_rate = 0.0 #총수익률(%) ######################################## ######## 종목 정보 가져오기 self.portfolio_stock_dict", "self.logging.logger.debug(\"매수취소 전달 실패\") elif not_quantity == 0: del self.not_account_stock_dict[order_num] # 실시간 체결 정보", "import * from config.errorCode import * from PyQt5.QtTest import * from config.kiwoomType import", "defaluse : \"000000\" order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막 주문번호 order_status", "self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\":", "merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def screen_number_setting(self): screen_overwrite = [] #계좌평가잔고내역에", "self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") del self.account_stock_dict[sCode] else:", "sRQName, i, \"종목번호\") # 출력 : A039423 // 알파벳 A는 장내주식, J는 ELW종목,", "self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000) self.file_delete() self.calculator_fnc() sys.exit() elif sRealType == \"주식체결\": a = self.dynamicCall(\"GetCommRealData(QString,", "120일 이평선의 최근 가격 구함 total_price = 0 for value in self.calcul_data[:120]: total_price", "(cnt % 50) == 0: meme_screen += 1 self.screen_meme_stock = str(meme_screen) if code", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default: 0 not_chegual_quan = int(not_chegual_quan) order_gubun =", "{} ########################### ####### 계좌 관련된 변수 self.account_stock_dict = {} self.not_account_stock_dict = {} self.deposit", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"일자\") # 출력 : 000070", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막 주문번호 order_status = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) # 출력:", "# \"r\"을 인자로 던져주면 파일 내용을 읽어 오겠다는 뜻이다. lines = f.readlines() #파일에", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price = abs(int(first_sell_price)) first_buy_price", "pass_success == True: self.logging.logger.debug(\"조건부 통과됨\") code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code) f = open(\"files/condition_stock.txt\", \"a\",", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000 current_price = abs(int(current_price)) first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가'])", "self.detail_account_info_event_loop.exec_() def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext): if sRQName == \"예수금상세현황요청\": deposit", "+(-)2520 e = abs(int(e)) f = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 :", "code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목번호\") # 출력 :", "abs(int(e)) f = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515 f =", "= self.not_account_stock_dict[order_number]['종목코드'] if code not in screen_overwrite: screen_overwrite.append(code) #포트폴리로에 담겨있는 종목들 for code", "order_price = int(order_price) not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default: 0 not_chegual_quan", "# 오늘자 주가가 120일 이평선에 걸쳐있는지 확인 bottom_stock_price = False check_price = None", "Kiwoom(QAxWidget): def __init__(self): super().__init__() self.realType = RealType() self.logging = Logging() # self.slack =", "order_no = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문번호\") order_status = self.dynamicCall(\"GetCommData(QString,", "code_nm.strip() stock_quantity = int(stock_quantity.strip()) buy_price = int(buy_price.strip()) learn_rate = float(learn_rate.strip()) current_price = int(current_price.strip())", "'', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for code in self.portfolio_stock_dict.keys(): screen_num = self.portfolio_stock_dict[code]['스크린번호'] fids = self.realType.REALTYPE['주식체결']['체결시간']", "stock_name.strip() origin_order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse : \"000000\" order_number =", "\"종목코드\") code = code.strip() # data = self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName) # [[‘’,", "0: self.logging.logger.debug(\"매도주문 전달 성공\") del self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문 전달 실패\") elif sCode in", "int(current_price.strip()) total_chegual_price = int(total_chegual_price.strip()) possible_quantity = int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price})", "QString, int, QString, int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, jd['주문가능수량'],", "fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감) value = self.dynamicCall(\"GetCommRealData(QString, int)\",", "self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int,", "QString)\", sTrCode, sRQName, i, \"거래량\") # 출력 : 000070 trading_value = self.dynamicCall(\"GetCommData(QString, QString,", ": possible_quantity}) self.logging.logger.debug(\"sPreNext : %s\" % sPrevNext) print(\"계좌에 가지고 있는 종목은 %s \"", "not_quantity == 0: del self.not_account_stock_dict[order_num] # 실시간 체결 정보 def chejan_slot(self, sGubun, nItemCnt,", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price = abs(int(first_buy_price)) if sCode not in self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price})", "\"주문번호\") order_status = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문상태\") # 접수,확인,체결", "= int(like_quan) buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price = abs(int(buy_price)) total_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가'])", "self.logging.logger.debug(\"매수취소 전달 성공\") else: self.logging.logger.debug(\"매수취소 전달 실패\") elif not_quantity == 0: del self.not_account_stock_dict[order_num]", "int, QString)\", sTrCode, sRQName, i, \"거래대금\") # 출력 : 000070 date = self.dynamicCall(\"GetCommData(QString,", "if moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx <= 20: self.logging.logger.debug(\"20일 동안 주가가 120일 이평선과", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, fid) if value == '0': self.logging.logger.debug(\"장 시작 전\") elif", "self.screen_meme_stock = \"6000\" #종목별 할당할 주문용스크린 번호 self.screen_start_stop_real = \"1000\" #장 시작/종료 실시간", "self.not_account_stock_dict[order_num][\"종목코드\"] meme_price = self.not_account_stock_dict[order_num]['주문가격'] not_quantity = self.not_account_stock_dict[order_num]['미체결수량'] order_gubun = self.not_account_stock_dict[order_num]['주문구분'] if order_gubun ==", "[\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3, code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num] ) if order_success ==", "self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문 전달 실패\") elif sCode in self.jango_dict.keys(): jd = self.jango_dict[sCode] meme_rate", "i in range(cnt): data = [] current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "self.login_event_loop.exit() def get_account_info(self): account_list = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") # 계좌번호 반환 account_num = account_list.split(';')[0]", "확인, 체결 order_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3 order_quan = int(order_quan)", "사용할 수 있게 변환해 주는 함수 self.event_slots() # 키움과 연결하기 위한 시그널 /", "QTest.qWait(5000) self.file_delete() self.calculator_fnc() sys.exit() elif sRealType == \"주식체결\": a = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "요청 관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목", "QString, int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'],", "abs(int(g)) h = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124 h =", "5 default : '' if chegual_quantity == '': chegual_quantity = 0 else: chegual_quantity", "stock_price = int(ls[2].split(\"\\n\")[0]) stock_price = abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close() def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict})", "total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총수익률(%)\") self.total_profit_loss_rate = float(total_profit_loss_rate)", "3 order_quan = int(order_quan) order_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000 order_price =", "# 계좌번호 반환 account_num = account_list.split(';')[0] self.account_num = account_num self.logging.logger.debug(\"계좌번호 : %s\" %", "chegual_time_str = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028' chegual_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) # 출력:", "def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다 딜레이를 준다. self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code)", "120: pass_success = False else: # 120일 이평선의 최근 가격 구함 total_price =", "stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name = stock_name.strip() current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price =", "QString)\", sTrCode, sRQName, i, \"매매가능수량\") self.logging.logger.debug(\"종목코드: %s - 종목명: %s - 보유수량: %s", "\"5000\" #종목별 할당할 스크린 번호 self.screen_meme_stock = \"6000\" #종목별 할당할 주문용스크린 번호 self.screen_start_stop_real", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98 d = float(d) e", "QString)\", sTrCode, sRQName, i, \"고가\") # 출력 : 000070 low_price = self.dynamicCall(\"GetCommData(QString, QString,", "if stock_quan == 0: del self.jango_dict[sCode] #송수신 메세지 get def msg_slot(self, sScrNo, sRQName,", "= abs(int(current_price)) first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010 first_sell_price = abs(int(first_sell_price)) first_buy_price", "-매도, +매수, -매도정정, +매수정정 not_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "text=\"주식 자동화 프로그램이 동작 되었습니다.\" ) def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된 api", "self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") else: self.logging.logger.debug(\"매도주문 전달", "elif int(self.calcul_data[idx][7]) > moving_average_price_prev and idx > 20: # 120일 이평선 위에 있는", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\",", "9:장마감) value = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, fid) if value == '0': self.logging.logger.debug(\"장 시작", "sFidList): if int(sGubun) == 0: #주문체결 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\",", "not_chegual_quan = int(not_chegual_quan) order_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수 order_gubun =", "= abs(int(i)) j = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530 j", "해당 경로에 파일이 있는지 체크한다. f = open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") # \"r\"을 인자로", "c = abs(int(c)) d = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98", "pass else: self.account_stock_dict[code] = {} code_nm = code_nm.strip() stock_quantity = int(stock_quantity.strip()) buy_price =", "code=None, date=None, sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다 딜레이를 준다. self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code) self.dynamicCall(\"SetInputValue(QString, QString)\",", "\"opw00001\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\",", ") if order_success == 0: self.logging.logger.debug(\"매수취소 전달 성공\") else: self.logging.logger.debug(\"매수취소 전달 실패\") elif", "self.detail_account_info_event_loop = QEventLoop() # 예수금 요청용 이벤트루프 self.calculator_event_loop = QEventLoop() ######################################### ########### 전체", "= [] current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") #", "sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\")", "from PyQt5.QAxContainer import * from PyQt5.QtCore import * from config.errorCode import * from", "event loop를 실행하기 위한 변수모음 self.login_event_loop = QEventLoop() #로그인 요청용 이벤트루프 self.detail_account_info_event_loop =", "포함 QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에 미체결 종목들 가져오기 실행 ######################################### QTest.qWait(10000) self.read_code() self.screen_number_setting()", "‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’,", "# (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감) value = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, fid) if", "전달 성공\") else: self.logging.logger.debug(\"매수주문 전달 실패\") not_meme_list = list(self.not_account_stock_dict) for order_num in not_meme_list:", "self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price = abs(int(first_sell_price)) first_buy_price =", "def get_account_info(self): account_list = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") # 계좌번호 반환 account_num = account_list.split(';')[0] self.account_num", "in self.not_account_stock_dict.keys(): code = self.not_account_stock_dict[order_number]['종목코드'] if code not in screen_overwrite: screen_overwrite.append(code) #포트폴리로에 담겨있는", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"미체결수량\") ok_quantity = self.dynamicCall(\"GetCommData(QString, QString, int,", "int(order_quantity.strip()) order_price = int(order_price.strip()) order_gubun = order_gubun.strip().lstrip('+').lstrip('-') not_quantity = int(not_quantity.strip()) ok_quantity = int(ok_quantity.strip())", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse : \"000000\" order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호'])", "QEventLoop() #로그인 요청용 이벤트루프 self.detail_account_info_event_loop = QEventLoop() # 예수금 요청용 이벤트루프 self.calculator_event_loop =", "동작 #print(\"kiwoom() class start. \") self.logging.logger.debug(\"Kiwoom() class start.\") ####### event loop를 실행하기 위한", "= [] ########################################## ####### 요청 스크린 번호 self.screen_my_info = \"2000\" #계좌 관련한 스크린", "len(self.calcul_data) < 120: pass_success = False else: # 120일 이평선의 최근 가격 구함", ": %s\" % account_num) def detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\",", "\"종목코드\", code) self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\") if date != None: self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\",", "str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif code not in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}})", "21000 order_price = int(order_price) not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default: 0", "\"예수금상세현황요청\", \"opw00001\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString,", "sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name = stock_name.strip() origin_order_number =", "self.realType.REALTYPE['잔고']['현재가']) current_price = abs(int(current_price)) stock_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan = int(stock_quan) like_quan =", "0 #출력가능 금액 self.total_profit_loss_money = 0 #총평가손익금액 self.total_profit_loss_rate = 0.0 #총수익률(%) ######################################## ########", "self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1, sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"] ) if order_success == 0:", "self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트", "self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def screen_number_setting(self): screen_overwrite = [] #계좌평가잔고내역에 있는 종목들 for code", "self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if stock_quan == 0: del self.jango_dict[sCode] #송수신 메세지 get def msg_slot(self,", "가격보다 낮은지 확인 if price_top_moving == True: if moving_average_price > moving_average_price_prev and check_price", "prev_price = int(self.calcul_data[idx][7]) break idx += 1 # 해당부분 이평선이 가장 최근의 이평선", "sCode) result = (self.use_money * 0.1) / e quantity = int(result) order_success =", "self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\":", "sRQName, 0, \"총평가손익금액\") self.total_profit_loss_money = int(total_profit_loss_money) total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "% len(self.calcul_data)) pass_success = False # 120일 이평선을 그릴만큼의 데이터가 있는지 체크 if", "받기 #0:장내, 10:코스닥 :param market_code: 시장코드 입력 :return: ''' code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code)", "QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def", "self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif code not in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt", "== 0: del self.not_account_stock_dict[order_num] # 실시간 체결 정보 def chejan_slot(self, sGubun, nItemCnt, sFidList):", "데이터가 있는지 체크 if self.calcul_data == None or len(self.calcul_data) < 120: pass_success =", "관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) #", "== False: self.logging.logger.debug(\"조건부 통과 못함\") self.calcul_data.clear() self.calculator_event_loop.exit() def stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) #", "data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy()) if sPrevNext == \"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총 일수 %s\"", "= line.split(\"\\t\") stock_code = ls[0] stock_name = ls[1] stock_price = int(ls[2].split(\"\\n\")[0]) stock_price =", "i, \"매입가\") # 매입가 : 000000000054100 learn_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "int(sGubun) == 1: #잔고 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name", "= int(total_buy_price) meme_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가'])", "moving_average_price > moving_average_price_prev and check_price > prev_price: self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자 이평선 가격보다", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price = abs(int(current_price)) stock_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan = int(stock_quan)", "os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에 파일이 있는지 체크한다. f = open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") #", ": defaluse : \"000000\" order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막 주문번호", "종목 분석관련 함수 모음 :return: ''' code_list = self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수 %s \"", "QString, int, QString)\", sTrCode, sRQName, i, \"매입금액\") possible_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "get_code_list_by_market(self, market_code): ''' 종목코드 리스트 받기 #0:장내, 10:코스닥 :param market_code: 시장코드 입력 :return:", "sTrCode, sRQName) for i in range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", ": +(-)2520 c = abs(int(c)) d = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력", "self.not_account_stock_dict[order_num] # 실시간 체결 정보 def chejan_slot(self, sGubun, nItemCnt, sFidList): if int(sGubun) ==", "str(temp_screen) if (cnt % 50) == 0: meme_screen += 1 self.screen_meme_stock = str(meme_screen)", "self.account_num, 2, sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입금액\") possible_quantity = self.dynamicCall(\"GetCommData(QString, QString,", "# 출력 : 240124 h = abs(int(h)) i = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['고가'])", "밑에 존재하는지 확인 prev_price = None if bottom_stock_price == True: moving_average_price_prev = 0", "QString, int, QString)\", sTrCode, sRQName, i, \"매매가능수량\") self.logging.logger.debug(\"종목코드: %s - 종목명: %s -", "for idx, code in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린 연결 끊기 self.logging.logger.debug(\"%s /", "if int(sGubun) == 0: #주문체결 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:]", "ls[1] stock_price = int(ls[2].split(\"\\n\")[0]) stock_price = abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close() def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\":", "self.portfolio_stock_dict.keys(): screen_num = self.portfolio_stock_dict[code]['스크린번호'] fids = self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", screen_num, code,", "셋팅 함수들 바로 실행 self.get_ocx_instance() #OCX 방식을 파이썬에 사용할 수 있게 변환해 주는", "int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock) # Tr서버로 전송 -Transaction self.calculator_event_loop.exec_() def read_code(self):", "종목 관리 self.all_stock_dict = {} ########################### ####### 계좌 관련된 변수 self.account_stock_dict = {}", "= order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028' chegual_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가'])", "\"0\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def trdata_slot(self, sScrNo,", "\" % self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif sRQName == \"주식일봉차트조회\": code = self.dynamicCall(\"GetCommData(QString, QString, int,", "있는 종목들 for order_number in self.not_account_stock_dict.keys(): code = self.not_account_stock_dict[order_number]['종목코드'] if code not in", "sPrevNext): if sRQName == \"예수금상세현황요청\": deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "\"현재가\") # 현재가 : 000000003450 total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "= 0.5 #예수금에서 실제 사용할 비율 self.output_deposit = 0 #출력가능 금액 self.total_profit_loss_money =", "i in range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목번호\")", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530 j = abs(int(j)) k", ": %s\" % sPrevNext) print(\"계좌에 가지고 있는 종목은 %s \" % rows) if", "c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\": i})", "스크린 번호 self.screen_calculation_stock = \"4000\" #계산용 스크린 번호 self.screen_real_stock = \"5000\" #종목별 할당할", "else: chegual_quantity = int(chegual_quantity) current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000 current_price =", "== 0: self.logging.logger.debug(\"매도주문 전달 성공\") del self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문 전달 실패\") elif sCode", "QString)\", sTrCode, sRQName, i, \"매입가\") # 매입가 : 000000000054100 learn_rate = self.dynamicCall(\"GetCommData(QString, QString,", "order_number in self.not_account_stock_dict.keys(): code = self.not_account_stock_dict[order_number]['종목코드'] if code not in screen_overwrite: screen_overwrite.append(code) #포트폴리로에", "관련된 변수 self.account_stock_dict = {} self.not_account_stock_dict = {} self.deposit = 0 #예수금 self.use_money", "# 출력: 접수, 확인, 체결 order_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3", "self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' : possible_quantity}) self.logging.logger.debug(\"sPreNext : %s\" %", "일수 %s\" % len(self.calcul_data)) pass_success = False # 120일 이평선을 그릴만큼의 데이터가 있는지", "QString, int, int, QString, QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1, sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'],", "QString, int, QString, int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, asd['매매가능수량'],", "self.logging.logger.debug(\"장 종료, 동시호가로 넘어감\") elif value == \"4\": self.logging.logger.debug(\"3시30분 장 종료\") for code", "sRealType == \"장시작시간\": fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감) value", "QString)\", sTrCode, sRQName, i, \"주문상태\") # 접수,확인,체결 order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다 딜레이를 준다. self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code) self.dynamicCall(\"SetInputValue(QString,", "self.logging.logger.debug(\"120일치 이평선 위에 있는 구간 확인됨\") price_top_moving = True prev_price = int(self.calcul_data[idx][7]) break", "self.portfolio_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) # 스크린번호 할당 cnt = 0", "or len(self.calcul_data) < 120: pass_success = False else: # 120일 이평선의 최근 가격", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name = stock_name.strip() origin_order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호'])", "abs(int(j)) k = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530 k =", "False check_price = None if int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price <= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘", "if date != None: self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date) self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\",", "########################### ####### 계좌 관련된 변수 self.account_stock_dict = {} self.not_account_stock_dict = {} self.deposit =", "sTrCode, sRQName, i, \"시가\") # 출력 : 000070 high_price = self.dynamicCall(\"GetCommData(QString, QString, int,", "= list(self.not_account_stock_dict) for order_num in not_meme_list: code = self.not_account_stock_dict[order_num][\"종목코드\"] meme_price = self.not_account_stock_dict[order_num]['주문가격'] not_quantity", "self.logging.logger.debug(\"오늘 주가 120이평선 아래에 걸쳐있는 것 확인\") bottom_stock_price = True check_price = int(self.calcul_data[0][6])", "리스트 받기 #0:장내, 10:코스닥 :param market_code: 시장코드 입력 :return: ''' code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\",", "#주문체결 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명'])", "self.output_deposit = int(output_deposit) self.logging.logger.debug(\"예수금 : %s\" % self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif sRQName ==", "확인됨\") price_top_moving = True prev_price = int(self.calcul_data[idx][7]) break idx += 1 # 해당부분", "sTrCode, sRQName, 0, \"종목코드\") code = code.strip() # data = self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode,", "슬롯 모음 self.real_event_slot() # 실시간 이벤트 시그널 / 슬롯 연결 self.signal_login_commConnect() #로그인 요청", "self.screen_number_setting() QTest.qWait(5000) #실시간 수신 관련 함수 self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'],", "sTrCode, sRQName, i, \"현재가\") # 출력 : 000070 value = self.dynamicCall(\"GetCommData(QString, QString, int,", "int)\", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530 j = abs(int(j)) k = self.dynamicCall(\"GetCommRealData(QString,", "%s\" % len(self.calcul_data)) pass_success = False # 120일 이평선을 그릴만큼의 데이터가 있는지 체크", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"체결량\") code = code.strip() code_nm =", "self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000) self.file_delete() self.calculator_fnc() sys.exit() elif sRealType == \"주식체결\": a", "출력 : A039423 // 알파벳 A는 장내주식, J는 ELW종목, Q는 ETN종목 code =", "int(use_money) self.use_money = self.use_money / 4 output_deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "시장코드 입력 :return: ''' code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list = code_list.split(';')[:-1] return code_list", "# data = self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName) # [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’,", "in range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목번호\") #", "int(order_price) not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default: 0 not_chegual_quan = int(not_chegual_quan)", "self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 : %s - %s - %s\" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate)) rows", "‘시가’, ‘고가’, ‘저가’, ‘’]. […]] cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) self.logging.logger.debug(\"남은 일자", "######## 종목 정보 가져오기 self.portfolio_stock_dict = {} self.jango_dict = {} ######################## ########### 종목", "120일 이평선과 같거나 위에 있으면 조건 통과 못함\") price_top_moving = False break elif", "#종목별 할당할 주문용스크린 번호 self.screen_start_stop_real = \"1000\" #장 시작/종료 실시간 스크린번호 ######################################## #########", "int(stock_quantity.strip()) buy_price = int(buy_price.strip()) learn_rate = float(learn_rate.strip()) current_price = int(current_price.strip()) total_chegual_price = int(total_chegual_price.strip())", "0: meme_screen += 1 self.screen_meme_stock = str(meme_screen) if code in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)})", "moving_average_price = total_price / 120 # 오늘자 주가가 120일 이평선에 걸쳐있는지 확인 bottom_stock_price", "1 self.screen_real_stock = str(temp_screen) if (cnt % 50) == 0: meme_screen += 1", "encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm, str(self.calcul_data[0][1]))) f.close() elif pass_success == False: self.logging.logger.debug(\"조건부 통과", "import os import sys from PyQt5.QAxContainer import * from PyQt5.QtCore import * from", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515 f = abs(int(f)) g =", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price = abs(int(first_sell_price))", "g = abs(int(g)) h = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124", "\"매입가\") # 매입가 : 000000000054100 learn_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "code) QTest.qWait(5000) self.file_delete() self.calculator_fnc() sys.exit() elif sRealType == \"주식체결\": a = self.dynamicCall(\"GetCommRealData(QString, int)\",", "self.use_money = int(use_money) self.use_money = self.use_money / 4 output_deposit = self.dynamicCall(\"GetCommData(QString, QString, int,", "stock_price = abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close() def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목':", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총평가손익금액\") self.total_profit_loss_money = int(total_profit_loss_money) total_profit_loss_rate", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"시가\") # 출력 : 000070", ": %s is updating... \" % (idx + 1, len(code_list), code)) self.day_kiwoom_db(code=code) def", "있는지 계속 확인 self.logging.logger.debug(\"120일치가 없음\") break total_price = 0 for value in self.calcul_data[idx:120+idx]:", "120 # 오늘자 주가가 120일 이평선에 걸쳐있는지 확인 bottom_stock_price = False check_price =", "first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010 first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\",", "- jd['매입단가']) / jd['매입단가'] * 100 if jd['주문가능수량'] > 0 and (meme_rate >", "self.logging.logger.debug(\"매도주문 전달 실패\") elif sCode in self.jango_dict.keys(): jd = self.jango_dict[sCode] meme_rate = (b", "= Slack() #슬랙 동작 #print(\"kiwoom() class start. \") self.logging.logger.debug(\"Kiwoom() class start.\") ####### event", "lines = f.readlines() #파일에 있는 내용들이 모두 읽어와 진다. for line in lines:", "+= int(value[1]) moving_average_price_prev = total_price / 120 if moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx", "sRQName, i, \"저가\") # 출력 : 000070 data.append(\"\") data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip())", "total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if stock_quan == 0: del self.jango_dict[sCode]", "def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") # 로그인 요청 시그널 self.login_event_loop.exec_() # 이벤트루프 실행 def login_slot(self,", "idx > 20: # 120일 이평선 위에 있는 구간 존재 self.logging.logger.debug(\"120일치 이평선 위에", "sRQName, 0, \"출금가능금액\") self.output_deposit = int(output_deposit) self.logging.logger.debug(\"예수금 : %s\" % self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit()", "\"2\": self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit() elif sRQName == \"실시간미체결요청\": rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode,", "스크린번호 할당 cnt = 0 for code in screen_overwrite: temp_screen = int(self.screen_real_stock) meme_screen", "있는 종목들 for code in self.account_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) #미체결에", "PyQt5.QtTest import * from config.kiwoomType import * from config.log_class import * # from", "for code in screen_overwrite: temp_screen = int(self.screen_real_stock) meme_screen = int(self.screen_meme_stock) if (cnt %", "QString, int, QString)\", sTrCode, sRQName, i, \"미체결수량\") ok_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "sRQName, i, \"고가\") # 출력 : 000070 low_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "int, QString)\", sTrCode, sRQName, i, \"주문구분\") # -매도, +매수, -매도정정, +매수정정 not_quantity =", "> prev_price: self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자 이평선 가격보다 낮은 것 확인\") self.logging.logger.debug(\"포착된 부분의", "종목이 있나 확인 pass else: self.account_stock_dict[code] = {} code_nm = code_nm.strip() stock_quantity =", "초기 셋팅 함수들 바로 실행 self.get_ocx_instance() #OCX 방식을 파이썬에 사용할 수 있게 변환해", "출력 : +240124 매수일때, -2034 매도일 때 g = abs(int(g)) h = self.dynamicCall(\"GetCommRealData(QString,", "self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif int(sGubun) == 1: #잔고", "not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결 종목 : %s \" % self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif sRQName", "== \"4\": self.logging.logger.debug(\"3시30분 장 종료\") for code in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'], code)", "def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext): if sRQName == \"예수금상세현황요청\": deposit =", "elif sCode in self.jango_dict.keys(): jd = self.jango_dict[sCode] meme_rate = (b - jd['매입단가']) /", "\"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock) # Tr서버로 전송 -Transaction self.calculator_event_loop.exec_() def read_code(self): if os.path.exists(\"files/condition_stock.txt\"):", "# 출력 : +(-)2530 j = abs(int(j)) k = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['저가'])", ": %s\" % self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif sRQName == \"계좌평가잔고내역요청\": total_buy_money = self.dynamicCall(\"GetCommData(QString,", "order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") else: self.logging.logger.debug(\"매도주문 전달 실패\") elif d >", "‘저가’, ‘’]. […]] cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) self.logging.logger.debug(\"남은 일자 수 %s\"", "\"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3,", "= int(order_no.strip()) order_status = order_status.strip() order_quantity = int(order_quantity.strip()) order_price = int(order_price.strip()) order_gubun =", "order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결 종목 : %s \" %", "data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy()) if sPrevNext == \"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else:", "# 계좌에 있는 종목의 총매입가 total_buy_price = int(total_buy_price) meme_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun", "sTrCode, sRQName, i, \"수익률(%)\") # 수익률 : -000000001.94 current_price = self.dynamicCall(\"GetCommData(QString, QString, int,", "= int(total_chegual_price.strip()) possible_quantity = int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate})", "h = abs(int(h)) i = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530", "in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif code not in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\":", "= 0 price_top_moving = False idx = 1 while True: if len(self.calcul_data[idx:]) <", "120일 이평선에 걸쳐있는지 확인 bottom_stock_price = False check_price = None if int(self.calcul_data[0][7]) <=", "관련 함수 self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for code in", "fids, \"1\") self.slack.notification( pretext=\"주식자동화 프로그램 동작\", title=\"주식 자동화 프로그램 동작\", fallback=\"주식 자동화 프로그램", "요청 시그널 포함 self.get_account_info() #계좌번호 가져오기 self.detail_account_info() #예수금 요청 시그널 포함 self.detail_account_mystock() #계좌평가잔고내역", "% 50) == 0: temp_screen += 1 self.screen_real_stock = str(temp_screen) if (cnt %", "self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530 i = abs(int(i)) j = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "할당할 주문용스크린 번호 self.screen_start_stop_real = \"1000\" #장 시작/종료 실시간 스크린번호 ######################################## ######### 초기", "import * from config.kiwoomType import * from config.log_class import * # from config.slack", "%s\" % ( code, code_nm, stock_quantity, buy_price, learn_rate, current_price)) if code in self.account_stock_dict:", "order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"매수취소\",", "value in self.calcul_data[:120]: total_price += int(value[1]) moving_average_price = total_price / 120 # 오늘자", "self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def screen_number_setting(self): screen_overwrite = [] #계좌평가잔고내역에 있는 종목들 for", "= account_num self.logging.logger.debug(\"계좌번호 : %s\" % account_num) def detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\",", "\"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString,", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan = int(stock_quan) like_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan = int(like_quan)", "date != None: self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date) self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\",", "code in self.portfolio_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) # 스크린번호 할당 cnt", "QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1, sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"] ) if order_success", "e > meme_price: order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int,", "and moving_average_price <= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가 120이평선 아래에 걸쳐있는 것 확인\") bottom_stock_price =", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") order_no = self.dynamicCall(\"GetCommData(QString, QString, int,", "주가 120이평선 아래에 걸쳐있는 것 확인\") bottom_stock_price = True check_price = int(self.calcul_data[0][6]) #", "if chegual_price == '': chegual_price = 0 else: chegual_price = int(chegual_price) chegual_quantity =", "sTrCode, sRQName, i, \"거래대금\") # 출력 : 000070 date = self.dynamicCall(\"GetCommData(QString, QString, int,", "True if pass_success == True: self.logging.logger.debug(\"조건부 통과됨\") code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code) f =", "% cnt) for i in range(cnt): data = [] current_price = self.dynamicCall(\"GetCommData(QString, QString,", "self.not_account_stock_dict[order_number]['종목코드'] if code not in screen_overwrite: screen_overwrite.append(code) #포트폴리로에 담겨있는 종목들 for code in", "in screen_overwrite: screen_overwrite.append(code) #포트폴리로에 담겨있는 종목들 for code in self.portfolio_stock_dict.keys(): if code not", "self.calculator_fnc() sys.exit() elif sRealType == \"주식체결\": a = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간']) #", "// 알파벳 A는 장내주식, J는 ELW종목, Q는 ETN종목 code = code.strip()[1:] code_nm =", "{} self.deposit = 0 #예수금 self.use_money = 0 #실제 투자에 사용할 금액 self.use_money_percent", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매매가능수량\") self.logging.logger.debug(\"종목코드: %s - 종목명: %s", "= [] #계좌평가잔고내역에 있는 종목들 for code in self.account_stock_dict.keys(): if code not in", "\"2\": self.logging.logger.debug(\"장 종료, 동시호가로 넘어감\") elif value == \"4\": self.logging.logger.debug(\"3시30분 장 종료\") for", "int(order_price.strip()) order_gubun = order_gubun.strip().lstrip('+').lstrip('-') not_quantity = int(not_quantity.strip()) ok_quantity = int(ok_quantity.strip()) if order_no in", "float(learn_rate.strip()) current_price = int(current_price.strip()) total_chegual_price = int(total_chegual_price.strip()) possible_quantity = int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\":", "self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결 종목 : %s \" % self.not_account_stock_dict[order_no])", "요청용 이벤트루프 self.calculator_event_loop = QEventLoop() ######################################### ########### 전체 종목 관리 self.all_stock_dict = {}", "and not_quantity > 0 and e > meme_price: order_success = self.dynamicCall( \"SendOrder(QString, QString,", "elif value == \"2\": self.logging.logger.debug(\"장 종료, 동시호가로 넘어감\") elif value == \"4\": self.logging.logger.debug(\"3시30분", "# 이벤트루프 실행 def login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가 완료됐으면 이벤트 루프를 종료한다.", "sCode not in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d})", "가져오기 self.portfolio_stock_dict = {} self.jango_dict = {} ######################## ########### 종목 분석 용 self.calcul_data", "+(-)2530 i = abs(int(i)) j = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 :", "QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString, QString,", "self.day_kiwoom_db(code=code) def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다 딜레이를 준다. self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\",", "% 50) == 0: meme_screen += 1 self.screen_meme_stock = str(meme_screen) if code in", "code = self.not_account_stock_dict[order_num][\"종목코드\"] meme_price = self.not_account_stock_dict[order_num]['주문가격'] not_quantity = self.not_account_stock_dict[order_num]['미체결수량'] order_gubun = self.not_account_stock_dict[order_num]['주문구분'] if", "if sRealType == \"장시작시간\": fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감)", "bottom_stock_price = False check_price = None if int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price <=", "int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520 e = abs(int(e)) f = self.dynamicCall(\"GetCommRealData(QString,", "f.close() def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def screen_number_setting(self): screen_overwrite =", ": +(-)12.98 d = float(d) e = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력", "# 120일 이평선 위에 있는 구간 존재 self.logging.logger.debug(\"120일치 이평선 위에 있는 구간 확인됨\")", "self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt += 1 # 실시간 데이터 얻어오기 def", "보유수량: %s - 매입가:%s - 수익률: %s - 현재가: %s\" % ( code,", "value == \"2\": self.logging.logger.debug(\"장 종료, 동시호가로 넘어감\") elif value == \"4\": self.logging.logger.debug(\"3시30분 장", "h}) self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\": k}) if sCode in self.account_stock_dict.keys() and sCode", ") if order_success == 0: self.logging.logger.debug(\"매수주문 전달 성공\") else: self.logging.logger.debug(\"매수주문 전달 실패\") not_meme_list", "def detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\",", "self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\": k}) if", "self.get_account_info() #계좌번호 가져오기 self.detail_account_info() #예수금 요청 시그널 포함 self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함", "int, QString, QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3, code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num] )", "int(total_profit_loss_money) total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총수익률(%)\") self.total_profit_loss_rate =", "elif value == \"4\": self.logging.logger.debug(\"3시30분 장 종료\") for code in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\",", "관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결", "모음 :return: ''' code_list = self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수 %s \" % len(code_list)) for", "int, QString)\", sTrCode, sRQName, i, \"주문번호\") order_status = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "not in self.jango_dict: self.logging.logger.debug(\"매수조건 통과 %s \" % sCode) result = (self.use_money *", "\"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"예수금상세현황요청\", \"opw00001\", sPrevNext, self.screen_my_info)", "#로그인 요청 시그널 포함 self.get_account_info() #계좌번호 가져오기 self.detail_account_info() #예수금 요청 시그널 포함 self.detail_account_mystock()", "번호 self.screen_my_info = \"2000\" #계좌 관련한 스크린 번호 self.screen_calculation_stock = \"4000\" #계산용 스크린", "order_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문가격\") order_gubun = self.dynamicCall(\"GetCommData(QString,", "0 and (meme_rate > 5 or meme_rate < -5): order_success = self.dynamicCall( \"SendOrder(QString,", "self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName) # [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’.", "from PyQt5.QtTest import * from config.kiwoomType import * from config.log_class import * #", ":param market_code: 시장코드 입력 :return: ''' code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list = code_list.split(';')[:-1]", "line.split(\"\\t\") stock_code = ls[0] stock_name = ls[1] stock_price = int(ls[2].split(\"\\n\")[0]) stock_price = abs(stock_price)", ") if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") else: self.logging.logger.debug(\"매도주문 전달 실패\") elif", "moving_average_price_prev and idx > 20: # 120일 이평선 위에 있는 구간 존재 self.logging.logger.debug(\"120일치", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124 h = abs(int(h)) i =", "current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price = abs(int(current_price)) stock_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan =", "sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530 j = abs(int(j)) k = self.dynamicCall(\"GetCommRealData(QString, int)\",", "self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124 h = abs(int(h)) i = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") # 출럭 : 한국기업평가 stock_quantity =", "first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price = abs(int(first_buy_price)) if sCode not in self.jango_dict.keys(): self.jango_dict.update({sCode:{}})", "first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif int(sGubun) == 1: #잔고 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124 매수일때, -2034 매도일 때 g", "체결 정보 def chejan_slot(self, sGubun, nItemCnt, sFidList): if int(sGubun) == 0: #주문체결 account_num", "in range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목코드\") code_nm", "확인 prev_price = None if bottom_stock_price == True: moving_average_price_prev = 0 price_top_moving =", "ETN종목 code = code.strip()[1:] code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트 def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") # 로그인 요청 시그널", "i, \"종목명\") # 출럭 : 한국기업평가 stock_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "{}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price})", "수익률: %s - 현재가: %s\" % ( code, code_nm, stock_quantity, buy_price, learn_rate, current_price))", "{} code_nm = code_nm.strip() stock_quantity = int(stock_quantity.strip()) buy_price = int(buy_price.strip()) learn_rate = float(learn_rate.strip())", "sPrevNext == \"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총 일수 %s\" % len(self.calcul_data)) pass_success =", "== True: if moving_average_price > moving_average_price_prev and check_price > prev_price: self.logging.logger.debug(\"포착된 이평선의 가격이", "class start.\") ####### event loop를 실행하기 위한 변수모음 self.login_event_loop = QEventLoop() #로그인 요청용", "변환해 주는 함수 self.event_slots() # 키움과 연결하기 위한 시그널 / 슬롯 모음 self.real_event_slot()", "QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success", "i, \"주문가격\") order_gubun = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문구분\") #", "+(-)2515 f = abs(int(f)) g = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 :", "# 출력: -6000 first_buy_price = abs(int(first_buy_price)) ######## 새로 들어온 주문이면 주문번호 할당 if", "== 0: self.logging.logger.debug(\"매수주문 전달 성공\") else: self.logging.logger.debug(\"매수주문 전달 실패\") not_meme_list = list(self.not_account_stock_dict) for", "연결 끊기 def get_code_list_by_market(self, market_code): ''' 종목코드 리스트 받기 #0:장내, 10:코스닥 :param market_code:", "\"2000\" #계좌 관련한 스크린 번호 self.screen_calculation_stock = \"4000\" #계산용 스크린 번호 self.screen_real_stock =", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") # 출럭 : 한국기업평가", "in not_meme_list: code = self.not_account_stock_dict[order_num][\"종목코드\"] meme_price = self.not_account_stock_dict[order_num]['주문가격'] not_quantity = self.not_account_stock_dict[order_num]['미체결수량'] order_gubun =", "asd['매입가'] * 100 if asd['매매가능수량'] > 0 and (meme_rate > 5 or meme_rate", "= abs(int(c)) d = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98 d", "self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인, 체결 order_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 :", "시작/종료 실시간 스크린번호 ######################################## ######### 초기 셋팅 함수들 바로 실행 self.get_ocx_instance() #OCX 방식을", "= abs(int(j)) k = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530 k", "a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g})", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문수량\") order_price = self.dynamicCall(\"GetCommData(QString, QString,", "sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\",", ": %s \" % self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif sRQName == \"주식일봉차트조회\": code = self.dynamicCall(\"GetCommData(QString,", "'' if chegual_quantity == '': chegual_quantity = 0 else: chegual_quantity = int(chegual_quantity) current_price", "if sCode in self.account_stock_dict.keys() and sCode not in self.jango_dict.keys(): asd = self.account_stock_dict[sCode] meme_rate", "stock_name = ls[1] stock_price = int(ls[2].split(\"\\n\")[0]) stock_price = abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close() def", "current_price = abs(int(current_price)) first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010 first_sell_price = abs(int(first_sell_price))", "in self.account_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) #미체결에 있는 종목들 for order_number", "= self.dynamicCall(\"GetMasterCodeName(QString)\", code) f = open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm, str(self.calcul_data[0][1])))", "possible_quantity = int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price})", "0, \"출금가능금액\") self.output_deposit = int(output_deposit) self.logging.logger.debug(\"예수금 : %s\" % self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif", "self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price = abs(int(first_buy_price)) if sCode not in self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\":", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"예수금\") self.deposit = int(deposit) use_money =", "len(self.calcul_data)) pass_success = False # 120일 이평선을 그릴만큼의 데이터가 있는지 체크 if self.calcul_data", "함수 모음 :return: ''' code_list = self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수 %s \" % len(code_list))", "= self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", screen_num, code, fids, \"1\") self.slack.notification( pretext=\"주식자동화 프로그램", "import * from PyQt5.QtCore import * from config.errorCode import * from PyQt5.QtTest import", "asd['매매가능수량'] > 0 and (meme_rate > 5 or meme_rate < -5): order_success =", "스크린 번호 self.screen_meme_stock = \"6000\" #종목별 할당할 주문용스크린 번호 self.screen_start_stop_real = \"1000\" #장", "sRQName, 0, \"총수익률(%)\") self.total_profit_loss_rate = float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 : %s - %s -", ": +(-)2520 e = abs(int(e)) f = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력", "self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000 order_price = int(order_price) not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력:", "장내주식, J는 ELW종목, Q는 ETN종목 code = code.strip()[1:] code_nm = self.dynamicCall(\"GetCommData(QString, QString, int,", "self.account_stock_dict: # dictionary 에 해당 종목이 있나 확인 pass else: self.account_stock_dict[code] = {}", "QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") # 현재가 : 000000003450 total_chegual_price =", "'3': self.logging.logger.debug(\"장 시작\") elif value == \"2\": self.logging.logger.debug(\"장 종료, 동시호가로 넘어감\") elif value", "\"1\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info)", "str(meme_screen) if code in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif code not in", "self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif code not in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock),", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"종목코드\") code = code.strip() #", ": 240124 h = abs(int(h)) i = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력", "order_gubun = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문구분\") # -매도, +매수,", "= 0 #출력가능 금액 self.total_profit_loss_money = 0 #총평가손익금액 self.total_profit_loss_rate = 0.0 #총수익률(%) ########################################", "뒤에 미체결 종목들 가져오기 실행 ######################################### QTest.qWait(10000) self.read_code() self.screen_number_setting() QTest.qWait(5000) #실시간 수신 관련", "Logging() # self.slack = Slack() #슬랙 동작 #print(\"kiwoom() class start. \") self.logging.logger.debug(\"Kiwoom() class", "order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028' chegual_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) #", "\"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext,", "== \"주식일봉차트조회\": code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"종목코드\") code", "total_profit_loss_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총평가손익금액\") self.total_profit_loss_money = int(total_profit_loss_money)", "sTrCode, sRQName, 0, \"총수익률(%)\") self.total_profit_loss_rate = float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 : %s - %s", "# 접수,확인,체결 order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문수량\") order_price", "= \"2000\" #계좌 관련한 스크린 번호 self.screen_calculation_stock = \"4000\" #계산용 스크린 번호 self.screen_real_stock", "20: # 120일 이평선 위에 있는 구간 존재 self.logging.logger.debug(\"120일치 이평선 위에 있는 구간", "else: self.account_stock_dict[code] = {} code_nm = code_nm.strip() stock_quantity = int(stock_quantity.strip()) buy_price = int(buy_price.strip())", "1: #잔고 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\",", "0 for value in self.calcul_data[idx:120+idx]: total_price += int(value[1]) moving_average_price_prev = total_price / 120", "self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for code in self.portfolio_stock_dict.keys(): screen_num = self.portfolio_stock_dict[code]['스크린번호'] fids = self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString,", "self.not_account_stock_dict[order_no] = {} self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity})", "‘고가’, ‘저가’, ‘’]. […]] cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) self.logging.logger.debug(\"남은 일자 수", "False: self.logging.logger.debug(\"조건부 통과 못함\") self.calcul_data.clear() self.calculator_event_loop.exit() def stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) # 스크린번호", ": 000070 high_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"고가\") #", "enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린 연결 끊기 self.logging.logger.debug(\"%s / %s : KOSDAQ Stock", "키움과 연결하기 위한 시그널 / 슬롯 모음 self.real_event_slot() # 실시간 이벤트 시그널 /", "출력 : 3 order_quan = int(order_quan) order_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000", "> 0 and (meme_rate > 5 or meme_rate < -5): order_success = self.dynamicCall(", "if sCode not in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\":", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_()", "‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]] cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName)", "self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif code not in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\":", "int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"]", "int, QString, int, int, QString, QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1, sCode, quantity, e,", "if chegual_quantity == '': chegual_quantity = 0 else: chegual_quantity = int(chegual_quantity) current_price =", "같거나 위에 있으면 조건 통과 못함\") price_top_moving = False break elif int(self.calcul_data[idx][7]) >", "self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린 연결 끊기 self.logging.logger.debug(\"%s / %s : KOSDAQ Stock Code", "self.realType.REALTYPE['주문체결']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name = stock_name.strip() origin_order_number", "value = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, fid) if value == '0': self.logging.logger.debug(\"장 시작 전\")", "order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") del self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문 전달 실패\") elif", "= int(buy_price.strip()) learn_rate = float(learn_rate.strip()) current_price = int(current_price.strip()) total_chegual_price = int(total_chegual_price.strip()) possible_quantity =", "#총평가손익금액 self.total_profit_loss_rate = 0.0 #총수익률(%) ######################################## ######## 종목 정보 가져오기 self.portfolio_stock_dict = {}", "* class Kiwoom(QAxWidget): def __init__(self): super().__init__() self.realType = RealType() self.logging = Logging() #", "self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default : '' if chegual_price == '': chegual_price =", "QString, int, QString)\", sTrCode, sRQName, i, \"주문번호\") order_status = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "order_no = int(order_no.strip()) order_status = order_status.strip() order_quantity = int(order_quantity.strip()) order_price = int(order_price.strip()) order_gubun", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수 order_gubun = order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str = self.dynamicCall(\"GetChejanData(int)\",", "meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if stock_quan == 0: del self.jango_dict[sCode] #송수신 메세지", "int)\", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124 매수일때, -2034 매도일 때 g =", "sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) # 스크린번호 연결 끊기 def get_code_list_by_market(self, market_code): ''' 종목코드 리스트", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS b = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['현재가'])", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530 k = abs(int(k)) if sCode", "sRQName, i, \"일자\") # 출력 : 000070 start_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "def msg_slot(self, sScrNo, sRQName, sTrCode, msg): self.logging.logger.debug(\"스크린: %s, 요청이름: %s, tr코드: %s ---", "stock_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"보유수량\") # 보유수량 :", "e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매수주문 전달 성공\") else: self.logging.logger.debug(\"매수주문", "요청용 이벤트루프 self.detail_account_info_event_loop = QEventLoop() # 예수금 요청용 이벤트루프 self.calculator_event_loop = QEventLoop() #########################################", "if code not in screen_overwrite: screen_overwrite.append(code) #포트폴리로에 담겨있는 종목들 for code in self.portfolio_stock_dict.keys():", "and idx <= 20: self.logging.logger.debug(\"20일 동안 주가가 120일 이평선과 같거나 위에 있으면 조건", "구간 존재 self.logging.logger.debug(\"120일치 이평선 위에 있는 구간 확인됨\") price_top_moving = True prev_price =", "매입가 : 000000000054100 learn_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"수익률(%)\")", "code_nm, stock_quantity, buy_price, learn_rate, current_price)) if code in self.account_stock_dict: # dictionary 에 해당", "for value in self.calcul_data[idx:120+idx]: total_price += int(value[1]) moving_average_price_prev = total_price / 120 if", "self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98 d = float(d) e = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "= self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"],", "== \"실시간미체결요청\": rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in range(rows): code", "번호 self.screen_meme_stock = \"6000\" #종목별 할당할 주문용스크린 번호 self.screen_start_stop_real = \"1000\" #장 시작/종료", "sys.exit() elif sRealType == \"주식체결\": a = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총평가손익금액\") self.total_profit_loss_money = int(total_profit_loss_money) total_profit_loss_rate =", "and sCode not in self.jango_dict: self.logging.logger.debug(\"매수조건 통과 %s \" % sCode) result =", "self.detail_account_info() #예수금 요청 시그널 포함 self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함 QTimer.singleShot(5000, self.not_concluded_account) #5초", "\"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def not_concluded_account(self,", "self.logging.logger.debug(\"매도주문 전달 성공\") else: self.logging.logger.debug(\"매도주문 전달 실패\") elif d > 2.0 and sCode", "50) == 0: meme_screen += 1 self.screen_meme_stock = str(meme_screen) if code in self.portfolio_stock_dict.keys():", "self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함 QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에 미체결 종목들 가져오기 실행", "접수, 확인, 체결 order_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3 order_quan =", "QString, int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\",", "QString, int, QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def trdata_slot(self, sScrNo, sRQName, sTrCode,", "b = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520 b = abs(int(b))", "= (b - asd['매입가']) / asd['매입가'] * 100 if asd['매매가능수량'] > 0 and", "f = open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") # \"r\"을 인자로 던져주면 파일 내용을 읽어 오겠다는", "str(self.screen_meme_stock)}) elif code not in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt +=", "저가가 오늘자 주가의 고가보다 낮은지 확인\") pass_success = True if pass_success == True:", "0: self.logging.logger.debug(\"매수취소 전달 성공\") else: self.logging.logger.debug(\"매수취소 전달 실패\") elif not_quantity == 0: del", "int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124 h = abs(int(h)) i = self.dynamicCall(\"GetCommRealData(QString,", "self.detail_account_info_event_loop.exit() elif sRQName == \"주식일봉차트조회\": code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "이평선에 걸쳐있는지 확인 bottom_stock_price = False check_price = None if int(self.calcul_data[0][7]) <= moving_average_price", "출럭 : 한국기업평가 stock_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"보유수량\")", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총매입금액\") self.total_buy_money = int(total_buy_money) total_profit_loss_money", "chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif int(sGubun) == 1: #잔고 account_num", "in lines: #줄바꿈된 내용들이 한줄 씩 읽어와진다. if line != \"\": ls =", "self.logging.logger.debug(\"매도주문 전달 성공\") del self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문 전달 실패\") elif sCode in self.jango_dict.keys():", "e = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520 e = abs(int(e))", "/ 슬롯 모음 self.real_event_slot() # 실시간 이벤트 시그널 / 슬롯 연결 self.signal_login_commConnect() #로그인", "int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] )", "\"주문가격\") order_gubun = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문구분\") # -매도,", "처리가 완료됐으면 이벤트 루프를 종료한다. self.login_event_loop.exit() def get_account_info(self): account_list = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") #", "= False idx = 1 while True: if len(self.calcul_data[idx:]) < 120: # 120일치가", "prev_price: self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자 이평선 가격보다 낮은 것 확인\") self.logging.logger.debug(\"포착된 부분의 저가가", "g = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124 매수일때, -2034 매도일", "-5): order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\",", "########################################## ####### 요청 스크린 번호 self.screen_my_info = \"2000\" #계좌 관련한 스크린 번호 self.screen_calculation_stock", "code in self.account_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) #미체결에 있는 종목들 for", "in screen_overwrite: screen_overwrite.append(code) # 스크린번호 할당 cnt = 0 for code in screen_overwrite:", "order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity})", "# 출력 : 000070 date = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "#계좌 관련한 스크린 번호 self.screen_calculation_stock = \"4000\" #계산용 스크린 번호 self.screen_real_stock = \"5000\"", "가지고 있는 종목은 %s \" % rows) if sPrevNext == \"2\": self.detail_account_mystock(sPrevNext=\"2\") else:", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문번호\") order_status = self.dynamicCall(\"GetCommData(QString, QString,", "self.realType.REALTYPE['잔고']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name = stock_name.strip() current_price", "QString)\", sTrCode, sRQName, i, \"일자\") # 출력 : 000070 start_price = self.dynamicCall(\"GetCommData(QString, QString,", "else: # 120일 이평선의 최근 가격 구함 total_price = 0 for value in", "고가보다 낮은지 확인\") pass_success = True if pass_success == True: self.logging.logger.debug(\"조건부 통과됨\") code_nm", "출력 HHMMSS b = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520 b", "int, QString, int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, asd['매매가능수량'], 0,", "금액 self.total_profit_loss_money = 0 #총평가손익금액 self.total_profit_loss_rate = 0.0 #총수익률(%) ######################################## ######## 종목 정보", "\"총평가손익금액\") self.total_profit_loss_money = int(total_profit_loss_money) total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0,", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래량\") # 출력 : 000070", "pass_success = False # 120일 이평선을 그릴만큼의 데이터가 있는지 체크 if self.calcul_data ==", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막 주문번호 order_status = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) #", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목번호\") # 출력 : A039423 //", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010 first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가'])", "self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\":", "0 else: chegual_price = int(chegual_price) chegual_quantity = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default", "sRQName, i, \"종목명\") order_no = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문번호\")", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028' chegual_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110", "True check_price = int(self.calcul_data[0][6]) # 과거 일봉 데이터를 조회하면서 120일 이평선보다 주가가 계속", "import * from config.log_class import * # from config.slack import * class Kiwoom(QAxWidget):", "int(chegual_quantity) current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000 current_price = abs(int(current_price)) first_sell_price =", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문가격\") order_gubun = self.dynamicCall(\"GetCommData(QString, QString, int,", "self.login_event_loop = QEventLoop() #로그인 요청용 이벤트루프 self.detail_account_info_event_loop = QEventLoop() # 예수금 요청용 이벤트루프", "#포트폴리로에 담겨있는 종목들 for code in self.portfolio_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code)", "= QEventLoop() #로그인 요청용 이벤트루프 self.detail_account_info_event_loop = QEventLoop() # 예수금 요청용 이벤트루프 self.calculator_event_loop", "self.screen_calculation_stock = \"4000\" #계산용 스크린 번호 self.screen_real_stock = \"5000\" #종목별 할당할 스크린 번호", "= int(total_profit_loss_money) total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총수익률(%)\") self.total_profit_loss_rate", "= abs(int(e)) f = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515 f", "sTrCode, sRQName, i, \"체결량\") code = code.strip() code_nm = code_nm.strip() order_no = int(order_no.strip())", "= self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName) # [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’,", "sCode, fid) if value == '0': self.logging.logger.debug(\"장 시작 전\") elif value == '3':", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문구분\") # -매도, +매수, -매도정정,", "chegual_price = int(chegual_price) chegual_quantity = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default : ''", "실패\") elif not_quantity == 0: del self.not_account_stock_dict[order_num] # 실시간 체결 정보 def chejan_slot(self,", "self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\": k}) if sCode in self.account_stock_dict.keys() and sCode not", "api 모듈 불러오기 def event_slots(self): self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션", "\") self.logging.logger.debug(\"Kiwoom() class start.\") ####### event loop를 실행하기 위한 변수모음 self.login_event_loop = QEventLoop()", "self.calcul_data.append(data.copy()) if sPrevNext == \"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총 일수 %s\" % len(self.calcul_data))", "int(value[1]) moving_average_price = total_price / 120 # 오늘자 주가가 120일 이평선에 걸쳐있는지 확인", "i}) self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\": k}) if sCode in self.account_stock_dict.keys() and sCode not in", "넘어감\") elif value == \"4\": self.logging.logger.debug(\"3시30분 장 종료\") for code in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString,", "total_profit_loss_rate)) rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in range(rows): code =", "가격 구함 total_price = 0 for value in self.calcul_data[:120]: total_price += int(value[1]) moving_average_price", "buy_price = int(buy_price.strip()) learn_rate = float(learn_rate.strip()) current_price = int(current_price.strip()) total_chegual_price = int(total_chegual_price.strip()) possible_quantity", "# 실시간 이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트 def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") #", "\"주식일봉차트조회\": code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"종목코드\") code =", "주문이면 주문번호 할당 if order_number not in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\":", "bottom_stock_price = True check_price = int(self.calcul_data[0][6]) # 과거 일봉 데이터를 조회하면서 120일 이평선보다", "QString, int, QString)\", sTrCode, sRQName, i, \"주문가격\") order_gubun = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "code) f = open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm, str(self.calcul_data[0][1]))) f.close() elif", "출력 : +(-)2515 f = abs(int(f)) g = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['거래량']) #", "stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name = stock_name.strip() origin_order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력", "abs(int(c)) d = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98 d =", "moving_average_price_prev = total_price / 120 if moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx <= 20:", "출력: 15, default: 0 not_chegual_quan = int(not_chegual_quan) order_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) # 출력:", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의 총매입가 total_buy_price = int(total_buy_price) meme_gubun = self.dynamicCall(\"GetChejanData(int)\",", "%s \" % len(code_list)) for idx, code in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name = stock_name.strip() current_price = self.dynamicCall(\"GetChejanData(int)\",", "if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") del self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문 전달 실패\")", "sTrCode, sRQName, i, \"거래량\") # 출력 : 000070 trading_value = self.dynamicCall(\"GetCommData(QString, QString, int,", "if code not in screen_overwrite: screen_overwrite.append(code) #미체결에 있는 종목들 for order_number in self.not_account_stock_dict.keys():", "요청 시그널 포함 self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함 QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에 미체결", "total_profit_loss_money, total_profit_loss_rate)) rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in range(rows): code", "int, QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName,", "QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\",", "출력 : +(-)12.98 d = float(d) e = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) #", "self.not_account_stock_dict[order_num]['주문가격'] not_quantity = self.not_account_stock_dict[order_num]['미체결수량'] order_gubun = self.not_account_stock_dict[order_num]['주문구분'] if order_gubun == \"매수\" and not_quantity", "sTrCode, sRQName, i, \"저가\") # 출력 : 000070 data.append(\"\") data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip())", "# 로그인 관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self):", "0: #주문체결 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\",", "f.close() elif pass_success == False: self.logging.logger.debug(\"조건부 통과 못함\") self.calcul_data.clear() self.calculator_event_loop.exit() def stop_screen_cancel(self, sScrNo=None):", "int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price})", "self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총 일수 %s\" % len(self.calcul_data)) pass_success = False # 120일", "self.not_account_stock_dict[order_num]['주문구분'] if order_gubun == \"매수\" and not_quantity > 0 and e > meme_price:", "\"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\",", "self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된 api 모듈 불러오기 def event_slots(self): self.OnEventConnect.connect(self.login_slot) # 로그인 관련", "self.deposit = 0 #예수금 self.use_money = 0 #실제 투자에 사용할 금액 self.use_money_percent =", "#계산용 스크린 번호 self.screen_real_stock = \"5000\" #종목별 할당할 스크린 번호 self.screen_meme_stock = \"6000\"", "번호 self.screen_start_stop_real = \"1000\" #장 시작/종료 실시간 스크린번호 ######################################## ######### 초기 셋팅 함수들", "#예수금 요청 시그널 포함 self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함 QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에", "def not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString, QString)\",", "# 출력 : +(-)2530 i = abs(int(i)) j = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['시가'])", "‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]]", "\"매입금액\") possible_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매매가능수량\") self.logging.logger.debug(\"종목코드: %s", "QString)\", sTrCode, sRQName) for i in range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "\"매매가능수량\") self.logging.logger.debug(\"종목코드: %s - 종목명: %s - 보유수량: %s - 매입가:%s - 수익률:", "# 출력 HHMMSS b = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520", "# 해당 경로에 파일이 있는지 체크한다. f = open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") # \"r\"을", "[] #계좌평가잔고내역에 있는 종목들 for code in self.account_stock_dict.keys(): if code not in screen_overwrite:", "start_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"시가\") # 출력 :", "전체 종목 관리 self.all_stock_dict = {} ########################### ####### 계좌 관련된 변수 self.account_stock_dict =", "data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy()) if sPrevNext == \"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총", "check_price = None if int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price <= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가", "할당 if order_number not in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\":", "% sPrevNext) print(\"계좌에 가지고 있는 종목은 %s \" % rows) if sPrevNext ==", "from config.errorCode import * from PyQt5.QtTest import * from config.kiwoomType import * from", "120일 이평선보다 주가가 계속 밑에 존재하는지 확인 prev_price = None if bottom_stock_price ==", "if moving_average_price > moving_average_price_prev and check_price > prev_price: self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자 이평선", "\"예수금\") self.deposit = int(deposit) use_money = float(self.deposit) * self.use_money_percent self.use_money = int(use_money) self.use_money", "통과됨\") code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code) f = open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" % (code,", "int, QString)\", sTrCode, sRQName, i, \"종목명\") # 출럭 : 한국기업평가 stock_quantity = self.dynamicCall(\"GetCommData(QString,", "읽어와 진다. for line in lines: #줄바꿈된 내용들이 한줄 씩 읽어와진다. if line", "= abs(int(first_buy_price)) ######## 새로 들어온 주문이면 주문번호 할당 if order_number not in self.not_account_stock_dict.keys():", "range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목번호\") # 출력", "- %s - %s\" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate)) rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode,", "elif value == '3': self.logging.logger.debug(\"장 시작\") elif value == \"2\": self.logging.logger.debug(\"장 종료, 동시호가로", "h = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124 h = abs(int(h))", "sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매수주문 전달 성공\")", "#예수금에서 실제 사용할 비율 self.output_deposit = 0 #출력가능 금액 self.total_profit_loss_money = 0 #총평가손익금액", "위에 있는 구간 확인됨\") price_top_moving = True prev_price = int(self.calcul_data[idx][7]) break idx +=", "0 #총평가손익금액 self.total_profit_loss_rate = 0.0 #총수익률(%) ######################################## ######## 종목 정보 가져오기 self.portfolio_stock_dict =", "QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\",", "1, sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매수주문 전달", "완료됐으면 이벤트 루프를 종료한다. self.login_event_loop.exit() def get_account_info(self): account_list = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") # 계좌번호", "stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price})", "check_price > prev_price: self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자 이평선 가격보다 낮은 것 확인\") self.logging.logger.debug(\"포착된", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수 order_gubun = order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간'])", "if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") else: self.logging.logger.debug(\"매도주문 전달 실패\") elif d", "int(ls[2].split(\"\\n\")[0]) stock_price = abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close() def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict})", "for i in range(cnt): data = [] current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "> moving_average_price_prev and idx > 20: # 120일 이평선 위에 있는 구간 존재", "sRQName, i, \"미체결수량\") ok_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"체결량\")", ": +(-)2530 i = abs(int(i)) j = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력", "낮은지 확인 if price_top_moving == True: if moving_average_price > moving_average_price_prev and check_price >", "주는 함수 self.event_slots() # 키움과 연결하기 위한 시그널 / 슬롯 모음 self.real_event_slot() #", "int, QString)\", sTrCode, sRQName, i, \"매입가\") # 매입가 : 000000000054100 learn_rate = self.dynamicCall(\"GetCommData(QString,", "\"일자\") # 출력 : 000070 start_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "sTrCode, sRQName, i, \"주문구분\") # -매도, +매수, -매도정정, +매수정정 not_quantity = self.dynamicCall(\"GetCommData(QString, QString,", "가져오기 실행 ######################################### QTest.qWait(10000) self.read_code() self.screen_number_setting() QTest.qWait(5000) #실시간 수신 관련 함수 self.dynamicCall(\"SetRealReg(QString, QString,", "screen_overwrite: temp_screen = int(self.screen_real_stock) meme_screen = int(self.screen_meme_stock) if (cnt % 50) == 0:", "sTrCode, sRQName, i, \"현재가\") # 현재가 : 000000003450 total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString, int,", "self.file_delete() self.calculator_fnc() sys.exit() elif sRealType == \"주식체결\": a = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간'])", "보유수량 : 000000000000010 buy_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입가\")", "sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98 d = float(d) e = self.dynamicCall(\"GetCommRealData(QString, int)\",", "= int(stock_quan) like_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan = int(like_quan) buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가'])", "출력 : 000070 trading_value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래대금\")", "% rows) if sPrevNext == \"2\": self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit() elif sRQName == \"실시간미체결요청\":", "self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\":", "읽어 오겠다는 뜻이다. lines = f.readlines() #파일에 있는 내용들이 모두 읽어와 진다. for", "예수금 요청용 이벤트루프 self.calculator_event_loop = QEventLoop() ######################################### ########### 전체 종목 관리 self.all_stock_dict =", "self.logging.logger.debug(\"120일치가 없음\") break total_price = 0 for value in self.calcul_data[idx:120+idx]: total_price += int(value[1])", "QString, int, QString)\", sTrCode, sRQName, i, \"고가\") # 출력 : 000070 low_price =", "QString)\", sTrCode, sRQName) self.logging.logger.debug(\"남은 일자 수 %s\" % cnt) for i in range(cnt):", "Slack() #슬랙 동작 #print(\"kiwoom() class start. \") self.logging.logger.debug(\"Kiwoom() class start.\") ####### event loop를", "== \"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총 일수 %s\" % len(self.calcul_data)) pass_success = False", "######################################## ######## 종목 정보 가져오기 self.portfolio_stock_dict = {} self.jango_dict = {} ######################## ###########", "self.logging.logger.debug(\"%s / %s : KOSDAQ Stock Code : %s is updating... \" %", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"출금가능금액\") self.output_deposit = int(output_deposit) self.logging.logger.debug(\"예수금 :", "chegual_price == '': chegual_price = 0 else: chegual_price = int(chegual_price) chegual_quantity = self.dynamicCall(\"GetChejanData(int)\",", "= total_price / 120 if moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx <= 20: self.logging.logger.debug(\"20일", "value == '0': self.logging.logger.debug(\"장 시작 전\") elif value == '3': self.logging.logger.debug(\"장 시작\") elif", "\"실시간미체결요청\": rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in range(rows): code =", "order_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3 order_quan = int(order_quan) order_price =", "== \"계좌평가잔고내역요청\": total_buy_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총매입금액\") self.total_buy_money", "sRQName, i, \"체결량\") code = code.strip() code_nm = code_nm.strip() order_no = int(order_no.strip()) order_status", "#계좌평가잔고내역 요청 시그널 포함 QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에 미체결 종목들 가져오기 실행 #########################################", "open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") # \"r\"을 인자로 던져주면 파일 내용을 읽어 오겠다는 뜻이다. lines", "5 or meme_rate < -5): order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString,", "Q는 ETN종목 code = code.strip()[1:] code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "event_slots(self): self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot)", "1 while True: if len(self.calcul_data[idx:]) < 120: # 120일치가 있는지 계속 확인 self.logging.logger.debug(\"120일치가", "QString, QString, int, QString, int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode,", "code = code.strip() # data = self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName) # [[‘’, ‘현재가’,", "code in self.portfolio_stock_dict.keys(): screen_num = self.portfolio_stock_dict[code]['스크린번호'] fids = self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\",", "요청 스크린 번호 self.screen_my_info = \"2000\" #계좌 관련한 스크린 번호 self.screen_calculation_stock = \"4000\"", "self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") # 계좌번호 반환 account_num = account_list.split(';')[0] self.account_num = account_num self.logging.logger.debug(\"계좌번호 :", "관련한 스크린 번호 self.screen_calculation_stock = \"4000\" #계산용 스크린 번호 self.screen_real_stock = \"5000\" #종목별", "bottom_stock_price == True: moving_average_price_prev = 0 price_top_moving = False idx = 1 while", "* 0.1) / e quantity = int(result) order_success = self.dynamicCall( \"SendOrder(QString, QString, QString,", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520 e = abs(int(e)) f", "screen_num, code, fids, \"1\") self.slack.notification( pretext=\"주식자동화 프로그램 동작\", title=\"주식 자동화 프로그램 동작\", fallback=\"주식", "not in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\":", ":return: ''' code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list = code_list.split(';')[:-1] return code_list def calculator_fnc(self):", "self.portfolio_stock_dict = {} self.jango_dict = {} ######################## ########### 종목 분석 용 self.calcul_data =", "self.screen_start_stop_real = \"1000\" #장 시작/종료 실시간 스크린번호 ######################################## ######### 초기 셋팅 함수들 바로", "출력 : 000070 start_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"시가\")", "\"1\") if date != None: self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date) self.dynamicCall(\"CommRqData(QString, QString, int, QString)\",", ": 000000003450 total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입금액\") possible_quantity", "출력 : +(-)2530 k = abs(int(k)) if sCode not in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\":", "관리 self.all_stock_dict = {} ########################### ####### 계좌 관련된 변수 self.account_stock_dict = {} self.not_account_stock_dict", "<filename>py_kiwoom/kiwoom_youtube.py import os import sys from PyQt5.QAxContainer import * from PyQt5.QtCore import *", "부분의 저가가 오늘자 주가의 고가보다 낮은지 확인\") pass_success = True if pass_success ==", "# from config.slack import * class Kiwoom(QAxWidget): def __init__(self): super().__init__() self.realType = RealType()", "meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\",", "000070 trading_value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래대금\") # 출력", "False idx = 1 while True: if len(self.calcul_data[idx:]) < 120: # 120일치가 있는지", "date=None, sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다 딜레이를 준다. self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code) self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\",", "QString, int, QString)\", sTrCode, sRQName, i, \"저가\") # 출력 : 000070 data.append(\"\") data.append(current_price.strip())", "15, default: 0 not_chegual_quan = int(not_chegual_quan) order_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도,", "\"장시작시간\": fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감) value = self.dynamicCall(\"GetCommRealData(QString,", "self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530 j = abs(int(j)) k = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "최근의 이평선 가격보다 낮은지 확인 if price_top_moving == True: if moving_average_price > moving_average_price_prev", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") # 현재가 : 000000003450 total_chegual_price", "# 출력 : +(-)12.98 d = float(d) e = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가'])", "# 출력: 21000 order_price = int(order_price) not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15,", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default: 0 not_chegual_quan = int(not_chegual_quan) order_gubun = self.dynamicCall(\"GetChejanData(int)\",", "# -매도, +매수, -매도정정, +매수정정 not_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "fids = self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", screen_num, code, fids, \"1\") self.slack.notification( pretext=\"주식자동화", "레지스트리에 저장된 api 모듈 불러오기 def event_slots(self): self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot)", "code.strip() # data = self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName) # [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’,", "sRQName == \"주식일봉차트조회\": code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"종목코드\")", "PyQt5.QAxContainer import * from PyQt5.QtCore import * from config.errorCode import * from PyQt5.QtTest", "quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매수주문 전달 성공\") else:", "self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' : possible_quantity}) self.logging.logger.debug(\"sPreNext :", "stock_quan == 0: del self.jango_dict[sCode] #송수신 메세지 get def msg_slot(self, sScrNo, sRQName, sTrCode,", "self.all_stock_dict = {} ########################### ####### 계좌 관련된 변수 self.account_stock_dict = {} self.not_account_stock_dict =", "self.jango_dict[sCode] meme_rate = (b - jd['매입단가']) / jd['매입단가'] * 100 if jd['주문가능수량'] >", "성공\") else: self.logging.logger.debug(\"매도주문 전달 실패\") elif d > 2.0 and sCode not in", "[‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]] cnt = self.dynamicCall(\"GetRepeatCnt(QString,", "code) self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\") if date != None: self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date)", "-2034 매도일 때 g = abs(int(g)) h = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) #", "해당부분 이평선이 가장 최근의 이평선 가격보다 낮은지 확인 if price_top_moving == True: if", "2, sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달", "종료, 동시호가로 넘어감\") elif value == \"4\": self.logging.logger.debug(\"3시30분 장 종료\") for code in", "not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default: 0 not_chegual_quan = int(not_chegual_quan) order_gubun", "sRQName, i, \"거래대금\") # 출력 : 000070 date = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "새로 들어온 주문이면 주문번호 할당 if order_number not in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\":", "int(result) order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\",", "self.not_concluded_account) #5초 뒤에 미체결 종목들 가져오기 실행 ######################################### QTest.qWait(10000) self.read_code() self.screen_number_setting() QTest.qWait(5000) #실시간", "########### 종목 분석 용 self.calcul_data = [] ########################################## ####### 요청 스크린 번호 self.screen_my_info", "있는 구간 확인됨\") price_top_moving = True prev_price = int(self.calcul_data[idx][7]) break idx += 1", "= code.strip()[1:] code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") #", "self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량':", "self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name = stock_name.strip() current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price", "\"000000\" order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막 주문번호 order_status = self.dynamicCall(\"GetChejanData(int)\",", "self.use_money_percent = 0.5 #예수금에서 실제 사용할 비율 self.output_deposit = 0 #출력가능 금액 self.total_profit_loss_money", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문구분\") # -매도, +매수, -매도정정, +매수정정", "# 매입가 : 000000000054100 learn_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' : possible_quantity}) self.logging.logger.debug(\"sPreNext", "= self.not_account_stock_dict[order_num][\"종목코드\"] meme_price = self.not_account_stock_dict[order_num]['주문가격'] not_quantity = self.not_account_stock_dict[order_num]['미체결수량'] order_gubun = self.not_account_stock_dict[order_num]['주문구분'] if order_gubun", "order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문수량\") order_price = self.dynamicCall(\"GetCommData(QString,", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목코드\") code_nm = self.dynamicCall(\"GetCommData(QString, QString,", "self.account_stock_dict[code].update({'매매가능수량' : possible_quantity}) self.logging.logger.debug(\"sPreNext : %s\" % sPrevNext) print(\"계좌에 가지고 있는 종목은 %s", "order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price})", "int(order_no.strip()) order_status = order_status.strip() order_quantity = int(order_quantity.strip()) order_price = int(order_price.strip()) order_gubun = order_gubun.strip().lstrip('+').lstrip('-')", "실행 ######################################### QTest.qWait(10000) self.read_code() self.screen_number_setting() QTest.qWait(5000) #실시간 수신 관련 함수 self.dynamicCall(\"SetRealReg(QString, QString, QString,", "order_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수 order_gubun = order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str =", "code not in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt += 1 #", "code not in screen_overwrite: screen_overwrite.append(code) # 스크린번호 할당 cnt = 0 for code", "+240124 매수일때, -2034 매도일 때 g = abs(int(g)) h = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "value == '3': self.logging.logger.debug(\"장 시작\") elif value == \"2\": self.logging.logger.debug(\"장 종료, 동시호가로 넘어감\")", "<= int(self.calcul_data[idx][6]) and idx <= 20: self.logging.logger.debug(\"20일 동안 주가가 120일 이평선과 같거나 위에", "\"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\",", "QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"예수금상세현황요청\", \"opw00001\",", "QString)\", sTrCode, sRQName, 0, \"총매입금액\") self.total_buy_money = int(total_buy_money) total_profit_loss_money = self.dynamicCall(\"GetCommData(QString, QString, int,", "# 트랜잭션 요청 관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결", "# 로그인 요청 시그널 self.login_event_loop.exec_() # 이벤트루프 실행 def login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인", "존재하는지 확인 prev_price = None if bottom_stock_price == True: moving_average_price_prev = 0 price_top_moving", "ok_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"체결량\") code = code.strip()", "stock_name.strip() current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price = abs(int(current_price)) stock_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan", "= open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") # \"r\"을 인자로 던져주면 파일 내용을 읽어 오겠다는 뜻이다.", "% (idx + 1, len(code_list), code)) self.day_kiwoom_db(code=code) def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"): QTest.qWait(3600)", "self.realType.REALTYPE['잔고']['주문가능수량']) like_quan = int(like_quan) buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price = abs(int(buy_price)) total_buy_price =", "QString)\", sTrCode, sRQName, i, \"주문구분\") # -매도, +매수, -매도정정, +매수정정 not_quantity = self.dynamicCall(\"GetCommData(QString,", "= QEventLoop() ######################################### ########### 전체 종목 관리 self.all_stock_dict = {} ########################### ####### 계좌", "출력: '151028' chegual_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default : '' if", "b = abs(int(b)) c = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520", "000000003450 total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입금액\") possible_quantity =", "len(code_list), code)) self.day_kiwoom_db(code=code) def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다 딜레이를 준다. self.dynamicCall(\"SetInputValue(QString,", "#장 시작/종료 실시간 스크린번호 ######################################## ######### 초기 셋팅 함수들 바로 실행 self.get_ocx_instance() #OCX", "시그널 포함 QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에 미체결 종목들 가져오기 실행 ######################################### QTest.qWait(10000) self.read_code()", "% len(code_list)) for idx, code in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린 연결 끊기", "True prev_price = int(self.calcul_data[idx][7]) break idx += 1 # 해당부분 이평선이 가장 최근의", "self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\":", "\"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm, str(self.calcul_data[0][1]))) f.close() elif pass_success == False: self.logging.logger.debug(\"조건부", "\"1000\" #장 시작/종료 실시간 스크린번호 ######################################## ######### 초기 셋팅 함수들 바로 실행 self.get_ocx_instance()", "= Logging() # self.slack = Slack() #슬랙 동작 #print(\"kiwoom() class start. \") self.logging.logger.debug(\"Kiwoom()", "if order_success == 0: self.logging.logger.debug(\"매수주문 전달 성공\") else: self.logging.logger.debug(\"매수주문 전달 실패\") not_meme_list =", "if os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에 파일이 있는지 체크한다. f = open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\")", "self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000 first_buy_price = abs(int(first_buy_price)) ######## 새로 들어온 주문이면 주문번호 할당", "= abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close() def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict})", "3, code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num] ) if order_success == 0: self.logging.logger.debug(\"매수취소 전달", "\"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt += 1 # 실시간 데이터 얻어오기 def realdata_slot(self, sCode, sRealType,", "QString, int, QString, int, int, QString, QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3, code, 0,", "(code, code_nm, str(self.calcul_data[0][1]))) f.close() elif pass_success == False: self.logging.logger.debug(\"조건부 통과 못함\") self.calcul_data.clear() self.calculator_event_loop.exit()", "#print(\"kiwoom() class start. \") self.logging.logger.debug(\"Kiwoom() class start.\") ####### event loop를 실행하기 위한 변수모음", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520 c = abs(int(c)) d =", "int(total_chegual_price.strip()) possible_quantity = int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\":", "QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") order_no = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "sRQName) # [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’,", "e quantity = int(result) order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int,", "# 출력 : defaluse : \"000000\" order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061", "실제 사용할 비율 self.output_deposit = 0 #출력가능 금액 self.total_profit_loss_money = 0 #총평가손익금액 self.total_profit_loss_rate", "default : '' if chegual_price == '': chegual_price = 0 else: chegual_price =", "self.screen_calculation_stock) # 스크린 연결 끊기 self.logging.logger.debug(\"%s / %s : KOSDAQ Stock Code :", "확인 if price_top_moving == True: if moving_average_price > moving_average_price_prev and check_price > prev_price:", "cnt = 0 for code in screen_overwrite: temp_screen = int(self.screen_real_stock) meme_screen = int(self.screen_meme_stock)", "code in screen_overwrite: temp_screen = int(self.screen_real_stock) meme_screen = int(self.screen_meme_stock) if (cnt % 50)", "int, int, QString, QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1, sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"]", "self.screen_calculation_stock) # Tr서버로 전송 -Transaction self.calculator_event_loop.exec_() def read_code(self): if os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에", "[…]] cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) self.logging.logger.debug(\"남은 일자 수 %s\" % cnt)", "출력: -6000 current_price = abs(int(current_price)) first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010 first_sell_price", "#계좌평가잔고내역에 있는 종목들 for code in self.account_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code)", "0, \"예수금\") self.deposit = int(deposit) use_money = float(self.deposit) * self.use_money_percent self.use_money = int(use_money)", "== '0': self.logging.logger.debug(\"장 시작 전\") elif value == '3': self.logging.logger.debug(\"장 시작\") elif value", "order_gubun == \"매수\" and not_quantity > 0 and e > meme_price: order_success =", "== 0: self.logging.logger.debug(\"매수취소 전달 성공\") else: self.logging.logger.debug(\"매수취소 전달 실패\") elif not_quantity == 0:", "current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") # 출력 :", "get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된 api 모듈 불러오기 def event_slots(self): self.OnEventConnect.connect(self.login_slot) # 로그인", "\"보유수량\") # 보유수량 : 000000000000010 buy_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "sTrCode, sRQName, i, \"매입가\") # 매입가 : 000000000054100 learn_rate = self.dynamicCall(\"GetCommData(QString, QString, int,", "0: self.logging.logger.debug(\"매수주문 전달 성공\") else: self.logging.logger.debug(\"매수주문 전달 실패\") not_meme_list = list(self.not_account_stock_dict) for order_num", "# 120일 이평선의 최근 가격 구함 total_price = 0 for value in self.calcul_data[:120]:", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price = abs(int(current_price)) stock_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan = int(stock_quan) like_quan", "def stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) # 스크린번호 연결 끊기 def get_code_list_by_market(self, market_code): '''", "self.account_stock_dict[sCode] meme_rate = (b - asd['매입가']) / asd['매입가'] * 100 if asd['매매가능수량'] >", "meme_rate = (b - jd['매입단가']) / jd['매입단가'] * 100 if jd['주문가능수량'] > 0", "def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def screen_number_setting(self): screen_overwrite = []", "self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\": k}) if sCode in", "nItemCnt, sFidList): if int(sGubun) == 0: #주문체결 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode =", "sRealData): if sRealType == \"장시작시간\": fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분),", "chegual_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default : '' if chegual_price ==", "int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"]", "int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520 c = abs(int(c)) d = self.dynamicCall(\"GetCommRealData(QString,", "not in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status})", "[] current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") # 출력", "self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close() def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def screen_number_setting(self):", "= self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list = code_list.split(';')[:-1] return code_list def calculator_fnc(self): ''' 종목 분석관련", "sRQName, i, \"현재가\") # 출력 : 000070 value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "실시간 체결 정보 def chejan_slot(self, sGubun, nItemCnt, sFidList): if int(sGubun) == 0: #주문체결", "#0:장내, 10:코스닥 :param market_code: 시장코드 입력 :return: ''' code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list", "종목들 가져오기 실행 ######################################### QTest.qWait(10000) self.read_code() self.screen_number_setting() QTest.qWait(5000) #실시간 수신 관련 함수 self.dynamicCall(\"SetRealReg(QString,", "low_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"저가\") # 출력 :", "= int(output_deposit) self.logging.logger.debug(\"예수금 : %s\" % self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif sRQName == \"계좌평가잔고내역요청\":", "self.not_account_stock_dict: pass else: self.not_account_stock_dict[order_no] = {} self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태':", "주문체결 관련한 이벤트 def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") # 로그인 요청 시그널 self.login_event_loop.exec_() # 이벤트루프", "data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy()) if sPrevNext == \"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext)", "= int(not_chegual_quan) order_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수 order_gubun = order_gubun.strip().lstrip('+').lstrip('-')", "if sCode not in self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\":", "int, QString)\", sTrCode, sRQName, i, \"체결량\") code = code.strip() code_nm = code_nm.strip() order_no", "이평선의 최근 가격 구함 total_price = 0 for value in self.calcul_data[:120]: total_price +=", "in screen_overwrite: temp_screen = int(self.screen_real_stock) meme_screen = int(self.screen_meme_stock) if (cnt % 50) ==", "# 키움과 연결하기 위한 시그널 / 슬롯 모음 self.real_event_slot() # 실시간 이벤트 시그널", "\"주문구분\") # -매도, +매수, -매도정정, +매수정정 not_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "data = [] current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\")", "int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] )", "비율 self.output_deposit = 0 #출력가능 금액 self.total_profit_loss_money = 0 #총평가손익금액 self.total_profit_loss_rate = 0.0", "in self.calcul_data[idx:120+idx]: total_price += int(value[1]) moving_average_price_prev = total_price / 120 if moving_average_price_prev <=", "not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price})", "account_list.split(';')[0] self.account_num = account_num self.logging.logger.debug(\"계좌번호 : %s\" % account_num) def detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString,", "self.realType.SENDTYPE['거래구분']['지정가'], order_num] ) if order_success == 0: self.logging.logger.debug(\"매수취소 전달 성공\") else: self.logging.logger.debug(\"매수취소 전달", "self.logging.logger.debug(\"조건부 통과됨\") code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code) f = open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" %", "self.not_account_stock_dict = {} self.deposit = 0 #예수금 self.use_money = 0 #실제 투자에 사용할", "가져오기 self.detail_account_info() #예수금 요청 시그널 포함 self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함 QTimer.singleShot(5000, self.not_concluded_account)", "####### 계좌 관련된 변수 self.account_stock_dict = {} self.not_account_stock_dict = {} self.deposit = 0", "self.login_event_loop.exec_() # 이벤트루프 실행 def login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가 완료됐으면 이벤트 루프를", "str(self.calcul_data[0][1]))) f.close() elif pass_success == False: self.logging.logger.debug(\"조건부 통과 못함\") self.calcul_data.clear() self.calculator_event_loop.exit() def stop_screen_cancel(self,", "수신 관련 함수 self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for code", "sRealType == \"주식체결\": a = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS b", "self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\":", "self.screen_my_info) self.detail_account_info_event_loop.exec_() def not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\")", "120이평선 아래에 걸쳐있는 것 확인\") bottom_stock_price = True check_price = int(self.calcul_data[0][6]) # 과거", "QString, QString, int, QString, int, int, QString, QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1, sCode,", "함수들 바로 실행 self.get_ocx_instance() #OCX 방식을 파이썬에 사용할 수 있게 변환해 주는 함수", "# 출력: 15, default: 0 not_chegual_quan = int(not_chegual_quan) order_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분']) #", "order_num in not_meme_list: code = self.not_account_stock_dict[order_num][\"종목코드\"] meme_price = self.not_account_stock_dict[order_num]['주문가격'] not_quantity = self.not_account_stock_dict[order_num]['미체결수량'] order_gubun", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515 f = abs(int(f)) g", "sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\")", "종목들 for code in self.portfolio_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) # 스크린번호", "self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\":", "QString, int, QString)\", sTrCode, sRQName, 0, \"총수익률(%)\") self.total_profit_loss_rate = float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 :", "= None if bottom_stock_price == True: moving_average_price_prev = 0 price_top_moving = False idx", "get def msg_slot(self, sScrNo, sRQName, sTrCode, msg): self.logging.logger.debug(\"스크린: %s, 요청이름: %s, tr코드: %s", "2110 default : '' if chegual_price == '': chegual_price = 0 else: chegual_price", "출력 : +(-)2520 c = abs(int(c)) d = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['등락율']) #", "= int(result) order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString,", "def chejan_slot(self, sGubun, nItemCnt, sFidList): if int(sGubun) == 0: #주문체결 account_num = self.dynamicCall(\"GetChejanData(int)\",", "float(d) e = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520 e =", "sCode, sRealType, sRealData): if sRealType == \"장시작시간\": fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분),", "not_quantity > 0 and e > meme_price: order_success = self.dynamicCall( \"SendOrder(QString, QString, QString,", "매수일때, -2034 매도일 때 g = abs(int(g)) h = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량'])", "open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm, str(self.calcul_data[0][1]))) f.close() elif pass_success == False:", "self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\":", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") order_no = self.dynamicCall(\"GetCommData(QString, QString,", "buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price = abs(int(buy_price)) total_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3 order_quan = int(order_quan) order_price = self.dynamicCall(\"GetChejanData(int)\",", "== \"예수금상세현황요청\": deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"예수금\") self.deposit", "sPrevNext, self.screen_calculation_stock) # Tr서버로 전송 -Transaction self.calculator_event_loop.exec_() def read_code(self): if os.path.exists(\"files/condition_stock.txt\"): # 해당", ": +(-)2515 f = abs(int(f)) g = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력", "str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt += 1 # 실시간 데이터 얻어오기 def realdata_slot(self, sCode,", "sRecordName, sPrevNext): if sRQName == \"예수금상세현황요청\": deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def screen_number_setting(self): screen_overwrite = [] #계좌평가잔고내역에 있는 종목들", "1 self.screen_meme_stock = str(meme_screen) if code in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif", "스크린번호 ######################################## ######### 초기 셋팅 함수들 바로 실행 self.get_ocx_instance() #OCX 방식을 파이썬에 사용할", "buy_price = abs(int(buy_price)) total_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의 총매입가 total_buy_price", "buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if stock_quan == 0:", "int(like_quan) buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price = abs(int(buy_price)) total_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) #", "\"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"예수금상세현황요청\", \"opw00001\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def detail_account_mystock(self, sPrevNext=\"0\"):", "QString)\", sTrCode, sRQName, i, \"종목명\") # 출럭 : 한국기업평가 stock_quantity = self.dynamicCall(\"GetCommData(QString, QString,", "value in self.calcul_data[idx:120+idx]: total_price += int(value[1]) moving_average_price_prev = total_price / 120 if moving_average_price_prev", "종목의 총매입가 total_buy_price = int(total_buy_price) meme_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price", "= (self.use_money * 0.1) / e quantity = int(result) order_success = self.dynamicCall( \"SendOrder(QString,", "self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\":", "= order_gubun.strip().lstrip('+').lstrip('-') not_quantity = int(not_quantity.strip()) ok_quantity = int(ok_quantity.strip()) if order_no in self.not_account_stock_dict: pass", "jd['주문가능수량'] > 0 and (meme_rate > 5 or meme_rate < -5): order_success =", "있는 내용들이 모두 읽어와 진다. for line in lines: #줄바꿈된 내용들이 한줄 씩", "asd['매입가']) / asd['매입가'] * 100 if asd['매매가능수량'] > 0 and (meme_rate > 5", "sTrCode, sRQName, i, \"종목번호\") # 출력 : A039423 // 알파벳 A는 장내주식, J는", "self.logging.logger.debug(\"조건부 통과 못함\") self.calcul_data.clear() self.calculator_event_loop.exit() def stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) # 스크린번호 연결", "QString, int, QString)\", sTrCode, sRQName, i, \"주문수량\") order_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\",", "######################################## ######### 초기 셋팅 함수들 바로 실행 self.get_ocx_instance() #OCX 방식을 파이썬에 사용할 수", "self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif sRQName == \"주식일봉차트조회\": code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "buy_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입가\") # 매입가 :", "할당할 스크린 번호 self.screen_meme_stock = \"6000\" #종목별 할당할 주문용스크린 번호 self.screen_start_stop_real = \"1000\"", "self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price = abs(int(first_buy_price)) if sCode", "self.slack.notification( pretext=\"주식자동화 프로그램 동작\", title=\"주식 자동화 프로그램 동작\", fallback=\"주식 자동화 프로그램 동작\", text=\"주식", "sTrCode, sRQName, i, \"매입금액\") possible_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "int, QString)\", sTrCode, sRQName, 0, \"예수금\") self.deposit = int(deposit) use_money = float(self.deposit) *", "self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\")", "sTrCode, sRQName, i, \"주문가격\") order_gubun = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "if int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price <= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가 120이평선 아래에 걸쳐있는", "QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if", "idx <= 20: self.logging.logger.debug(\"20일 동안 주가가 120일 이평선과 같거나 위에 있으면 조건 통과", "+매수정정 not_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"미체결수량\") ok_quantity =", "in self.calcul_data[:120]: total_price += int(value[1]) moving_average_price = total_price / 120 # 오늘자 주가가", "self.screen_my_info) self.detail_account_info_event_loop.exec_() def detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\")", "#송수신 메세지 get def msg_slot(self, sScrNo, sRQName, sTrCode, msg): self.logging.logger.debug(\"스크린: %s, 요청이름: %s,", "code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목코드\") code_nm = self.dynamicCall(\"GetCommData(QString,", "else: self.logging.logger.debug(\"매수취소 전달 실패\") elif not_quantity == 0: del self.not_account_stock_dict[order_num] # 실시간 체결", "걸쳐있는지 확인 bottom_stock_price = False check_price = None if int(self.calcul_data[0][7]) <= moving_average_price and", "= float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 : %s - %s - %s\" % (total_buy_money, total_profit_loss_money,", "class start. \") self.logging.logger.debug(\"Kiwoom() class start.\") ####### event loop를 실행하기 위한 변수모음 self.login_event_loop", "== 0: del self.jango_dict[sCode] #송수신 메세지 get def msg_slot(self, sScrNo, sRQName, sTrCode, msg):", "출력: 접수, 확인, 체결 order_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3 order_quan", "위한 변수모음 self.login_event_loop = QEventLoop() #로그인 요청용 이벤트루프 self.detail_account_info_event_loop = QEventLoop() # 예수금", "== '3': self.logging.logger.debug(\"장 시작\") elif value == \"2\": self.logging.logger.debug(\"장 종료, 동시호가로 넘어감\") elif", "self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit() elif sRQName == \"실시간미체결요청\": rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName)", "self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의 총매입가 total_buy_price = int(total_buy_price) meme_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분'])", "QString)\", sTrCode, sRQName, i, \"미체결수량\") ok_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"예수금상세현황요청\", \"opw00001\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString,", "\"기준일자\", date) self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock) # Tr서버로 전송", "a = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS b = self.dynamicCall(\"GetCommRealData(QString, int)\",", "not in self.jango_dict.keys(): asd = self.account_stock_dict[sCode] meme_rate = (b - asd['매입가']) / asd['매입가']", "종목은 %s \" % rows) if sPrevNext == \"2\": self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit() elif", "fid) if value == '0': self.logging.logger.debug(\"장 시작 전\") elif value == '3': self.logging.logger.debug(\"장", "시그널 포함 self.get_account_info() #계좌번호 가져오기 self.detail_account_info() #예수금 요청 시그널 포함 self.detail_account_mystock() #계좌평가잔고내역 요청", "QString, QString, QString)\", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for code in self.portfolio_stock_dict.keys(): screen_num =", "screen_num = self.portfolio_stock_dict[code]['스크린번호'] fids = self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", screen_num, code, fids,", "0 and e > meme_price: order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString,", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") # 출력 : 000070", "(b - asd['매입가']) / asd['매입가'] * 100 if asd['매매가능수량'] > 0 and (meme_rate", "+(-)12.98 d = float(d) e = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 :", "def calculator_fnc(self): ''' 종목 분석관련 함수 모음 :return: ''' code_list = self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥", "= int(use_money) self.use_money = self.use_money / 4 output_deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "한국기업평가 stock_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"보유수량\") # 보유수량", "code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity})", "if line != \"\": ls = line.split(\"\\t\") stock_code = ls[0] stock_name = ls[1]", "int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가 120이평선 아래에 걸쳐있는 것 확인\") bottom_stock_price = True check_price =", "0 for code in screen_overwrite: temp_screen = int(self.screen_real_stock) meme_screen = int(self.screen_meme_stock) if (cnt", "def realdata_slot(self, sCode, sRealType, sRealData): if sRealType == \"장시작시간\": fid = self.realType.REALTYPE[sRealType]['장운영구분'] #", "/ asd['매입가'] * 100 if asd['매매가능수량'] > 0 and (meme_rate > 5 or", "def detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\",", "c = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520 c = abs(int(c))", "이벤트 루프를 종료한다. self.login_event_loop.exit() def get_account_info(self): account_list = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") # 계좌번호 반환", "ls[0] stock_name = ls[1] stock_price = int(ls[2].split(\"\\n\")[0]) stock_price = abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close()", "sRQName, 0, \"종목코드\") code = code.strip() # data = self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName)", "\"저가\") # 출력 : 000070 data.append(\"\") data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip())", "int(self.calcul_data[0][6]) # 과거 일봉 데이터를 조회하면서 120일 이평선보다 주가가 계속 밑에 존재하는지 확인", "self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default : '' if chegual_quantity == '': chegual_quantity =", "screen_overwrite.append(code) #미체결에 있는 종목들 for order_number in self.not_account_stock_dict.keys(): code = self.not_account_stock_dict[order_number]['종목코드'] if code", "0, self.realType.SENDTYPE['거래구분']['지정가'], order_num] ) if order_success == 0: self.logging.logger.debug(\"매수취소 전달 성공\") else: self.logging.logger.debug(\"매수취소", ": '' if chegual_quantity == '': chegual_quantity = 0 else: chegual_quantity = int(chegual_quantity)", "계좌에 있는 종목의 총매입가 total_buy_price = int(total_buy_price) meme_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun =", "# 현재가 : 000000003450 total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "0: del self.jango_dict[sCode] #송수신 메세지 get def msg_slot(self, sScrNo, sRQName, sTrCode, msg): self.logging.logger.debug(\"스크린:", "QString)\", sTrCode, sRQName, 0, \"종목코드\") code = code.strip() # data = self.dynamicCall(\"GetCommDataEx(QString, QString)\",", "전달 성공\") else: self.logging.logger.debug(\"매수취소 전달 실패\") elif not_quantity == 0: del self.not_account_stock_dict[order_num] #", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default : '' if chegual_price == '':", "self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막 주문번호 order_status = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수,", "던져주면 파일 내용을 읽어 오겠다는 뜻이다. lines = f.readlines() #파일에 있는 내용들이 모두", "screen_overwrite.append(code) #포트폴리로에 담겨있는 종목들 for code in self.portfolio_stock_dict.keys(): if code not in screen_overwrite:", "실행 def login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가 완료됐으면 이벤트 루프를 종료한다. self.login_event_loop.exit() def", "cnt) for i in range(cnt): data = [] current_price = self.dynamicCall(\"GetCommData(QString, QString, int,", "QTest.qWait(3600) #3.6초마다 딜레이를 준다. self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code) self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\") if", "in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린 연결 끊기 self.logging.logger.debug(\"%s / %s : KOSDAQ", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래대금\") # 출력 : 000070 date", "self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\": k}) if sCode in self.account_stock_dict.keys() and", "self.read_code() self.screen_number_setting() QTest.qWait(5000) #실시간 수신 관련 함수 self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", self.screen_start_stop_real, '',", "요청 시그널 self.login_event_loop.exec_() # 이벤트루프 실행 def login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가 완료됐으면", "이평선 위에 있는 구간 확인됨\") price_top_moving = True prev_price = int(self.calcul_data[idx][7]) break idx", "못함\") self.calcul_data.clear() self.calculator_event_loop.exit() def stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) # 스크린번호 연결 끊기 def", "order_quantity = int(order_quantity.strip()) order_price = int(order_price.strip()) order_gubun = order_gubun.strip().lstrip('+').lstrip('-') not_quantity = int(not_quantity.strip()) ok_quantity", "출력 : +(-)2520 b = abs(int(b)) c = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비']) #", "self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num,", "int, QString)\", sTrCode, sRQName, i, \"저가\") # 출력 : 000070 data.append(\"\") data.append(current_price.strip()) data.append(value.strip())", "line in lines: #줄바꿈된 내용들이 한줄 씩 읽어와진다. if line != \"\": ls", "0, \"총매입금액\") self.total_buy_money = int(total_buy_money) total_profit_loss_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "0 price_top_moving = False idx = 1 while True: if len(self.calcul_data[idx:]) < 120:", "self.logging.logger.debug(\"매수주문 전달 실패\") not_meme_list = list(self.not_account_stock_dict) for order_num in not_meme_list: code = self.not_account_stock_dict[order_num][\"종목코드\"]", "준다. self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code) self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\") if date != None:", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name = stock_name.strip()", "break idx += 1 # 해당부분 이평선이 가장 최근의 이평선 가격보다 낮은지 확인", "\"현재가\":stock_price}}) f.close() def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def screen_number_setting(self): screen_overwrite", "0.5 #예수금에서 실제 사용할 비율 self.output_deposit = 0 #출력가능 금액 self.total_profit_loss_money = 0", "= int(ls[2].split(\"\\n\")[0]) stock_price = abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close() def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목':", "사용할 금액 self.use_money_percent = 0.5 #예수금에서 실제 사용할 비율 self.output_deposit = 0 #출력가능", "위한 시그널 / 슬롯 모음 self.real_event_slot() # 실시간 이벤트 시그널 / 슬롯 연결", "i, \"주문구분\") # -매도, +매수, -매도정정, +매수정정 not_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "= int(not_quantity.strip()) ok_quantity = int(ok_quantity.strip()) if order_no in self.not_account_stock_dict: pass else: self.not_account_stock_dict[order_no] =", "market_code) code_list = code_list.split(';')[:-1] return code_list def calculator_fnc(self): ''' 종목 분석관련 함수 모음", "계속 확인 self.logging.logger.debug(\"120일치가 없음\") break total_price = 0 for value in self.calcul_data[idx:120+idx]: total_price", "QString, QString)\", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for code in self.portfolio_stock_dict.keys(): screen_num = self.portfolio_stock_dict[code]['스크린번호']", "order_quan = int(order_quan) order_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000 order_price = int(order_price)", "else: self.logging.logger.debug(\"총 일수 %s\" % len(self.calcul_data)) pass_success = False # 120일 이평선을 그릴만큼의", "and idx > 20: # 120일 이평선 위에 있는 구간 존재 self.logging.logger.debug(\"120일치 이평선", "\" % sCode) result = (self.use_money * 0.1) / e quantity = int(result)", "실행하기 위한 변수모음 self.login_event_loop = QEventLoop() #로그인 요청용 이벤트루프 self.detail_account_info_event_loop = QEventLoop() #", "로그인 관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련 이벤트 self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot)", "실시간 스크린번호 ######################################## ######### 초기 셋팅 함수들 바로 실행 self.get_ocx_instance() #OCX 방식을 파이썬에", "아래에 걸쳐있는 것 확인\") bottom_stock_price = True check_price = int(self.calcul_data[0][6]) # 과거 일봉", "int(total_buy_money) total_profit_loss_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총평가손익금액\") self.total_profit_loss_money =", "self.portfolio_stock_dict}) def screen_number_setting(self): screen_overwrite = [] #계좌평가잔고내역에 있는 종목들 for code in self.account_stock_dict.keys():", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000 order_price = int(order_price) not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량'])", "-매도, +매수 order_gubun = order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028' chegual_price", "i, \"종목번호\") # 출력 : A039423 // 알파벳 A는 장내주식, J는 ELW종목, Q는", "int(self.calcul_data[idx][7]) break idx += 1 # 해당부분 이평선이 가장 최근의 이평선 가격보다 낮은지", ":return: ''' code_list = self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수 %s \" % len(code_list)) for idx,", "파일 내용을 읽어 오겠다는 뜻이다. lines = f.readlines() #파일에 있는 내용들이 모두 읽어와", "이평선이 가장 최근의 이평선 가격보다 낮은지 확인 if price_top_moving == True: if moving_average_price", "프로그램이 동작 되었습니다.\" ) def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된 api 모듈 불러오기", "i, \"종목코드\") code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") order_no", "1 # 실시간 데이터 얻어오기 def realdata_slot(self, sCode, sRealType, sRealData): if sRealType ==", "else: self.logging.logger.debug(\"매수주문 전달 실패\") not_meme_list = list(self.not_account_stock_dict) for order_num in not_meme_list: code =", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총매입금액\") self.total_buy_money = int(total_buy_money) total_profit_loss_money =", "code, fids, \"1\") self.slack.notification( pretext=\"주식자동화 프로그램 동작\", title=\"주식 자동화 프로그램 동작\", fallback=\"주식 자동화", "sRQName, i, \"주문구분\") # -매도, +매수, -매도정정, +매수정정 not_quantity = self.dynamicCall(\"GetCommData(QString, QString, int,", "in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e})", "from config.log_class import * # from config.slack import * class Kiwoom(QAxWidget): def __init__(self):", "self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) self.logging.logger.debug(\"남은 일자 수 %s\" % cnt) for i in", "check_price = int(self.calcul_data[0][6]) # 과거 일봉 데이터를 조회하면서 120일 이평선보다 주가가 계속 밑에", "\"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") del self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문", "= self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수 %s \" % len(code_list)) for idx, code in enumerate(code_list):", "current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000 current_price = abs(int(current_price)) first_sell_price = self.dynamicCall(\"GetChejanData(int)\",", "first_buy_price}) if stock_quan == 0: del self.jango_dict[sCode] #송수신 메세지 get def msg_slot(self, sScrNo,", "self.logging.logger.debug(\"스크린: %s, 요청이름: %s, tr코드: %s --- %s\" %(sScrNo, sRQName, sTrCode, msg)) #파일", "변수 self.account_stock_dict = {} self.not_account_stock_dict = {} self.deposit = 0 #예수금 self.use_money =", "chegual_price = 0 else: chegual_price = int(chegual_price) chegual_quantity = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) # 출력:", "code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list = code_list.split(';')[:-1] return code_list def calculator_fnc(self): ''' 종목", "sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\")", "이평선 가격보다 낮은지 확인 if price_top_moving == True: if moving_average_price > moving_average_price_prev and", "first_buy_price = abs(int(first_buy_price)) ######## 새로 들어온 주문이면 주문번호 할당 if order_number not in", "self.real_event_slot() # 실시간 이벤트 시그널 / 슬롯 연결 self.signal_login_commConnect() #로그인 요청 시그널 포함", "d > 2.0 and sCode not in self.jango_dict: self.logging.logger.debug(\"매수조건 통과 %s \" %", "int, QString)\", sTrCode, sRQName, i, \"시가\") # 출력 : 000070 high_price = self.dynamicCall(\"GetCommData(QString,", "in self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan})", "동작 되었습니다.\" ) def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된 api 모듈 불러오기 def", "< 120: pass_success = False else: # 120일 이평선의 최근 가격 구함 total_price", "meme_price = self.not_account_stock_dict[order_num]['주문가격'] not_quantity = self.not_account_stock_dict[order_num]['미체결수량'] order_gubun = self.not_account_stock_dict[order_num]['주문구분'] if order_gubun == \"매수\"", "QString)\", sTrCode, sRQName, 0, \"예수금\") self.deposit = int(deposit) use_money = float(self.deposit) * self.use_money_percent", "order_gubun = order_gubun.strip().lstrip('+').lstrip('-') not_quantity = int(not_quantity.strip()) ok_quantity = int(ok_quantity.strip()) if order_no in self.not_account_stock_dict:", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"저가\") # 출력 : 000070 data.append(\"\")", "sTrCode, sRQName, i, \"보유수량\") # 보유수량 : 000000000000010 buy_price = self.dynamicCall(\"GetCommData(QString, QString, int,", "* from config.errorCode import * from PyQt5.QtTest import * from config.kiwoomType import *", "QString)\", sTrCode, sRQName, i, \"체결량\") code = code.strip() code_nm = code_nm.strip() order_no =", "계좌번호 반환 account_num = account_list.split(';')[0] self.account_num = account_num self.logging.logger.debug(\"계좌번호 : %s\" % account_num)", "뜻이다. lines = f.readlines() #파일에 있는 내용들이 모두 읽어와 진다. for line in", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"보유수량\") # 보유수량 : 000000000000010 buy_price", "self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0:", "self.detail_account_info_event_loop.exit() elif sRQName == \"계좌평가잔고내역요청\": total_buy_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "i, \"체결량\") code = code.strip() code_nm = code_nm.strip() order_no = int(order_no.strip()) order_status =", "내용들이 한줄 씩 읽어와진다. if line != \"\": ls = line.split(\"\\t\") stock_code =", "if self.calcul_data == None or len(self.calcul_data) < 120: pass_success = False else: #", "code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") # 출럭 :", "메세지 get def msg_slot(self, sScrNo, sRQName, sTrCode, msg): self.logging.logger.debug(\"스크린: %s, 요청이름: %s, tr코드:", "QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success", "== '': chegual_quantity = 0 else: chegual_quantity = int(chegual_quantity) current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가'])", "self.logging.logger.debug(\"매도주문 전달 실패\") elif d > 2.0 and sCode not in self.jango_dict: self.logging.logger.debug(\"매수조건", "\"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext,", "000070 date = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"일자\") # 출력", "== 0: meme_screen += 1 self.screen_meme_stock = str(meme_screen) if code in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\":", "self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\":", "total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' : possible_quantity}) self.logging.logger.debug(\"sPreNext : %s\" % sPrevNext) print(\"계좌에 가지고 있는 종목은", "self.account_stock_dict.keys() and sCode not in self.jango_dict.keys(): asd = self.account_stock_dict[sCode] meme_rate = (b -", "self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse : \"000000\" order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭:", "int(chegual_price) chegual_quantity = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default : '' if chegual_quantity", "def get_code_list_by_market(self, market_code): ''' 종목코드 리스트 받기 #0:장내, 10:코스닥 :param market_code: 시장코드 입력", "0.1) / e quantity = int(result) order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int,", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"예수금상세현황요청\", \"opw00001\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_()", "% (code, code_nm, str(self.calcul_data[0][1]))) f.close() elif pass_success == False: self.logging.logger.debug(\"조건부 통과 못함\") self.calcul_data.clear()", "self.not_account_stock_dict[order_num]['미체결수량'] order_gubun = self.not_account_stock_dict[order_num]['주문구분'] if order_gubun == \"매수\" and not_quantity > 0 and", "-매도정정, +매수정정 not_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"미체결수량\") ok_quantity", "self.screen_real_stock = \"5000\" #종목별 할당할 스크린 번호 self.screen_meme_stock = \"6000\" #종목별 할당할 주문용스크린", "sTrCode, sRQName, 0, \"총평가손익금액\") self.total_profit_loss_money = int(total_profit_loss_money) total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "def screen_number_setting(self): screen_overwrite = [] #계좌평가잔고내역에 있는 종목들 for code in self.account_stock_dict.keys(): if", "\"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") else: self.logging.logger.debug(\"매도주문 전달 실패\")", "self.realType.REALTYPE['주문체결']['주문구분']) # 출력: -매도, +매수 order_gubun = order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) #", "포함 self.get_account_info() #계좌번호 가져오기 self.detail_account_info() #예수금 요청 시그널 포함 self.detail_account_mystock() #계좌평가잔고내역 요청 시그널", "현재가: %s\" % ( code, code_nm, stock_quantity, buy_price, learn_rate, current_price)) if code in", "QString)\", self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000) self.file_delete() self.calculator_fnc() sys.exit() elif sRealType == \"주식체결\": a =", "sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총 일수 %s\" % len(self.calcul_data)) pass_success = False # 120일 이평선을", "date) self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock) # Tr서버로 전송 -Transaction", "/ 슬롯 연결 self.signal_login_commConnect() #로그인 요청 시그널 포함 self.get_account_info() #계좌번호 가져오기 self.detail_account_info() #예수금", "self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif sRQName == \"계좌평가잔고내역요청\": total_buy_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", ": 000000000054100 learn_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"수익률(%)\") #", "QString)\", sTrCode, sRQName, i, \"현재가\") # 출력 : 000070 value = self.dynamicCall(\"GetCommData(QString, QString,", "self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515 f = abs(int(f)) g = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def screen_number_setting(self): screen_overwrite = [] #계좌평가잔고내역에 있는", "chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif int(sGubun) == 1:", "sCode not in self.jango_dict.keys(): asd = self.account_stock_dict[sCode] meme_rate = (b - asd['매입가']) /", "A는 장내주식, J는 ELW종목, Q는 ETN종목 code = code.strip()[1:] code_nm = self.dynamicCall(\"GetCommData(QString, QString,", "code in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린 연결 끊기 self.logging.logger.debug(\"%s / %s :", "f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\": k}) if sCode", "(total_buy_money, total_profit_loss_money, total_profit_loss_rate)) rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in range(rows):", "data.append(\"\") self.calcul_data.append(data.copy()) if sPrevNext == \"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총 일수 %s\" %", "% sCode) result = (self.use_money * 0.1) / e quantity = int(result) order_success", "f.readlines() #파일에 있는 내용들이 모두 읽어와 진다. for line in lines: #줄바꿈된 내용들이", "real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트 def signal_login_commConnect(self):", "# 실시간 체결 정보 def chejan_slot(self, sGubun, nItemCnt, sFidList): if int(sGubun) == 0:", "self.event_slots() # 키움과 연결하기 위한 시그널 / 슬롯 모음 self.real_event_slot() # 실시간 이벤트", "self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif int(sGubun) == 1: #잔고 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호'])", "분석관련 함수 모음 :return: ''' code_list = self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수 %s \" %", "< 120: # 120일치가 있는지 계속 확인 self.logging.logger.debug(\"120일치가 없음\") break total_price = 0", "sCode not in self.jango_dict: self.logging.logger.debug(\"매수조건 통과 %s \" % sCode) result = (self.use_money", "#로그인 요청용 이벤트루프 self.detail_account_info_event_loop = QEventLoop() # 예수금 요청용 이벤트루프 self.calculator_event_loop = QEventLoop()", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_()", "order_status.strip() order_quantity = int(order_quantity.strip()) order_price = int(order_price.strip()) order_gubun = order_gubun.strip().lstrip('+').lstrip('-') not_quantity = int(not_quantity.strip())", "QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if", "False break elif int(self.calcul_data[idx][7]) > moving_average_price_prev and idx > 20: # 120일 이평선", "에 해당 종목이 있나 확인 pass else: self.account_stock_dict[code] = {} code_nm = code_nm.strip()", "= 0 #실제 투자에 사용할 금액 self.use_money_percent = 0.5 #예수금에서 실제 사용할 비율", "int(ok_quantity.strip()) if order_no in self.not_account_stock_dict: pass else: self.not_account_stock_dict[order_no] = {} self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명':", "i, \"저가\") # 출력 : 000070 data.append(\"\") data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip())", "screen_overwrite: screen_overwrite.append(code) # 스크린번호 할당 cnt = 0 for code in screen_overwrite: temp_screen", "int, QString)\", sTrCode, sRQName, i, \"일자\") # 출력 : 000070 start_price = self.dynamicCall(\"GetCommData(QString,", "self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520 b = abs(int(b)) c = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "#로그인 처리가 완료됐으면 이벤트 루프를 종료한다. self.login_event_loop.exit() def get_account_info(self): account_list = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\")", "self.signal_login_commConnect() #로그인 요청 시그널 포함 self.get_account_info() #계좌번호 가져오기 self.detail_account_info() #예수금 요청 시그널 포함", "QString, int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'],", "[\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success ==", "= int(order_quan) order_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000 order_price = int(order_price) not_chegual_quan", "스크린번호 연결 끊기 def get_code_list_by_market(self, market_code): ''' 종목코드 리스트 받기 #0:장내, 10:코스닥 :param", "0 #예수금 self.use_money = 0 #실제 투자에 사용할 금액 self.use_money_percent = 0.5 #예수금에서", "dictionary 에 해당 종목이 있나 확인 pass else: self.account_stock_dict[code] = {} code_nm =", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan = int(stock_quan) like_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan = int(like_quan) buy_price", "len(self.calcul_data[idx:]) < 120: # 120일치가 있는지 계속 확인 self.logging.logger.debug(\"120일치가 없음\") break total_price =", "self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\":", "\"6000\" #종목별 할당할 주문용스크린 번호 self.screen_start_stop_real = \"1000\" #장 시작/종료 실시간 스크린번호 ########################################", "\"매수\" and not_quantity > 0 and e > meme_price: order_success = self.dynamicCall( \"SendOrder(QString,", "total_price / 120 if moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx <= 20: self.logging.logger.debug(\"20일 동안", "내용들이 모두 읽어와 진다. for line in lines: #줄바꿈된 내용들이 한줄 씩 읽어와진다.", "확인 self.logging.logger.debug(\"120일치가 없음\") break total_price = 0 for value in self.calcul_data[idx:120+idx]: total_price +=", "= abs(int(current_price)) stock_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan = int(stock_quan) like_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량'])", "#줄바꿈된 내용들이 한줄 씩 읽어와진다. if line != \"\": ls = line.split(\"\\t\") stock_code", "\" % (idx + 1, len(code_list), code)) self.day_kiwoom_db(code=code) def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"):", "되었습니다.\" ) def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된 api 모듈 불러오기 def event_slots(self):", "= self.jango_dict[sCode] meme_rate = (b - jd['매입단가']) / jd['매입단가'] * 100 if jd['주문가능수량']", "일자 수 %s\" % cnt) for i in range(cnt): data = [] current_price", "‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’,", "self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun}) self.not_account_stock_dict[order_number].update({\"주문/체결시간\":", "chegual_quantity = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default : '' if chegual_quantity ==", "int, QString)\", sTrCode, sRQName, i, \"종목번호\") # 출력 : A039423 // 알파벳 A는", "== \"2\": self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit() elif sRQName == \"실시간미체결요청\": rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\",", "chegual_quantity == '': chegual_quantity = 0 else: chegual_quantity = int(chegual_quantity) current_price = self.dynamicCall(\"GetChejanData(int)\",", "elif pass_success == False: self.logging.logger.debug(\"조건부 통과 못함\") self.calcul_data.clear() self.calculator_event_loop.exit() def stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\",", "-Transaction self.calculator_event_loop.exec_() def read_code(self): if os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에 파일이 있는지 체크한다. f", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price = abs(int(buy_price)) total_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는", "self.account_stock_dict[code] = {} code_nm = code_nm.strip() stock_quantity = int(stock_quantity.strip()) buy_price = int(buy_price.strip()) learn_rate", "order_gubun = self.not_account_stock_dict[order_num]['주문구분'] if order_gubun == \"매수\" and not_quantity > 0 and e", "self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif int(sGubun) == 1: #잔고 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode =", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98 d = float(d) e =", "= self.not_account_stock_dict[order_num]['주문구분'] if order_gubun == \"매수\" and not_quantity > 0 and e >", "= {} self.deposit = 0 #예수금 self.use_money = 0 #실제 투자에 사용할 금액", "sRQName, 0, \"총매입금액\") self.total_buy_money = int(total_buy_money) total_profit_loss_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520 c = abs(int(c)) d = self.dynamicCall(\"GetCommRealData(QString, int)\",", "self.logging.logger.debug(\"매수조건 통과 %s \" % sCode) result = (self.use_money * 0.1) / e", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString,", "meme_rate = (b - asd['매입가']) / asd['매입가'] * 100 if asd['매매가능수량'] > 0", "signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") # 로그인 요청 시그널 self.login_event_loop.exec_() # 이벤트루프 실행 def login_slot(self, err_code):", "int(self.calcul_data[idx][7]) > moving_average_price_prev and idx > 20: # 120일 이평선 위에 있는 구간", "sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520 b = abs(int(b)) c = self.dynamicCall(\"GetCommRealData(QString, int)\",", "= self.not_account_stock_dict[order_num]['주문가격'] not_quantity = self.not_account_stock_dict[order_num]['미체결수량'] order_gubun = self.not_account_stock_dict[order_num]['주문구분'] if order_gubun == \"매수\" and", "idx += 1 # 해당부분 이평선이 가장 최근의 이평선 가격보다 낮은지 확인 if", "방식을 파이썬에 사용할 수 있게 변환해 주는 함수 self.event_slots() # 키움과 연결하기 위한", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"시가\") # 출력 : 000070 high_price", "self.logging.logger.debug(\"총 일수 %s\" % len(self.calcul_data)) pass_success = False # 120일 이평선을 그릴만큼의 데이터가", "+매수, -매도정정, +매수정정 not_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"미체결수량\")", "chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif int(sGubun)", "오늘자 주가가 120일 이평선에 걸쳐있는지 확인 bottom_stock_price = False check_price = None if", "None or len(self.calcul_data) < 120: pass_success = False else: # 120일 이평선의 최근", "0 #실제 투자에 사용할 금액 self.use_money_percent = 0.5 #예수금에서 실제 사용할 비율 self.output_deposit", "QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext):", "for i in range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "= RealType() self.logging = Logging() # self.slack = Slack() #슬랙 동작 #print(\"kiwoom() class", "i in range(rows): code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목코드\")", "sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS b = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력", "# 스크린 연결 끊기 self.logging.logger.debug(\"%s / %s : KOSDAQ Stock Code : %s", "반환 account_num = account_list.split(';')[0] self.account_num = account_num self.logging.logger.debug(\"계좌번호 : %s\" % account_num) def", "self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3, code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num] ) if order_success == 0:", "self.realType = RealType() self.logging = Logging() # self.slack = Slack() #슬랙 동작 #print(\"kiwoom()", "출력 : +(-)2530 j = abs(int(j)) k = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['저가']) #", "moving_average_price <= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가 120이평선 아래에 걸쳐있는 것 확인\") bottom_stock_price = True", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문번호\") order_status = self.dynamicCall(\"GetCommData(QString, QString, int,", "확인\") self.logging.logger.debug(\"포착된 부분의 저가가 오늘자 주가의 고가보다 낮은지 확인\") pass_success = True if", "self.total_profit_loss_rate = float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 : %s - %s - %s\" % (total_buy_money,", "실시간 이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트 def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") # 로그인", "/ jd['매입단가'] * 100 if jd['주문가능수량'] > 0 and (meme_rate > 5 or", "int, QString, int, int, QString, QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3, code, 0, 0,", "sRQName, i, \"매매가능수량\") self.logging.logger.debug(\"종목코드: %s - 종목명: %s - 보유수량: %s - 매입가:%s", "sTrCode, sRQName, i, \"매매가능수량\") self.logging.logger.debug(\"종목코드: %s - 종목명: %s - 보유수량: %s -", "# 120일 이평선을 그릴만큼의 데이터가 있는지 체크 if self.calcul_data == None or len(self.calcul_data)", "abs(int(buy_price)) total_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의 총매입가 total_buy_price = int(total_buy_price)", "\"종목번호\") # 출력 : A039423 // 알파벳 A는 장내주식, J는 ELW종목, Q는 ETN종목", "''' 종목 분석관련 함수 모음 :return: ''' code_list = self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수 %s", "self.calcul_data = [] ########################################## ####### 요청 스크린 번호 self.screen_my_info = \"2000\" #계좌 관련한", "self.logging.logger.debug(\"남은 일자 수 %s\" % cnt) for i in range(cnt): data = []", "sRQName, i, \"주문수량\") order_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문가격\")", "sTrCode, sRQName, i, \"주문수량\") order_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "k = abs(int(k)) if sCode not in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b})", "if order_success == 0: self.logging.logger.debug(\"매수취소 전달 성공\") else: self.logging.logger.debug(\"매수취소 전달 실패\") elif not_quantity", "없음\") break total_price = 0 for value in self.calcul_data[idx:120+idx]: total_price += int(value[1]) moving_average_price_prev", "screen_overwrite: screen_overwrite.append(code) #포트폴리로에 담겨있는 종목들 for code in self.portfolio_stock_dict.keys(): if code not in", "self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124 매수일때, -2034 매도일 때 g = abs(int(g)) h", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS b = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy()) if sPrevNext == \"2\": self.day_kiwoom_db(code=code,", "start. \") self.logging.logger.debug(\"Kiwoom() class start.\") ####### event loop를 실행하기 위한 변수모음 self.login_event_loop =", "현재가 : 000000003450 total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입금액\")", "이평선보다 주가가 계속 밑에 존재하는지 확인 prev_price = None if bottom_stock_price == True:", "current_price = abs(int(current_price)) stock_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['보유수량']) stock_quan = int(stock_quan) like_quan = self.dynamicCall(\"GetChejanData(int)\",", "= 0 for value in self.calcul_data[idx:120+idx]: total_price += int(value[1]) moving_average_price_prev = total_price /", "order_gubun = order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028' chegual_price = self.dynamicCall(\"GetChejanData(int)\",", "learn_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"수익률(%)\") # 수익률 :", "rows) if sPrevNext == \"2\": self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit() elif sRQName == \"실시간미체결요청\": rows", "%s - 종목명: %s - 보유수량: %s - 매입가:%s - 수익률: %s -", "오늘자 주가의 고가보다 낮은지 확인\") pass_success = True if pass_success == True: self.logging.logger.debug(\"조건부", "== '': chegual_price = 0 else: chegual_price = int(chegual_price) chegual_quantity = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량'])", "first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000 first_buy_price = abs(int(first_buy_price)) ######## 새로 들어온", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\") if date != None: self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date) self.dynamicCall(\"CommRqData(QString,", "= total_price / 120 # 오늘자 주가가 120일 이평선에 걸쳐있는지 확인 bottom_stock_price =", "QString, int, QString)\", sTrCode, sRQName, i, \"거래량\") # 출력 : 000070 trading_value =", "i, \"시가\") # 출력 : 000070 high_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", ": 000070 date = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"일자\") #", "QString)\", sTrCode, sRQName, i, \"종목번호\") # 출력 : A039423 // 알파벳 A는 장내주식,", "주문번호 할당 if order_number not in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number})", "possible_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매매가능수량\") self.logging.logger.debug(\"종목코드: %s -", "{} self.not_account_stock_dict = {} self.deposit = 0 #예수금 self.use_money = 0 #실제 투자에", "stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number}) self.not_account_stock_dict[order_number].update({\"주문구분\": order_gubun})", "/ 4 output_deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"출금가능금액\") self.output_deposit", "QTest.qWait(5000) #실시간 수신 관련 함수 self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\")", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520 b = abs(int(b)) c", "출력 : 000070 low_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"저가\")", "self.logging.logger.debug(\"장 시작\") elif value == \"2\": self.logging.logger.debug(\"장 종료, 동시호가로 넘어감\") elif value ==", "if code in self.account_stock_dict: # dictionary 에 해당 종목이 있나 확인 pass else:", "그릴만큼의 데이터가 있는지 체크 if self.calcul_data == None or len(self.calcul_data) < 120: pass_success", "\"수익률(%)\") # 수익률 : -000000001.94 current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "# 출력: 5 default : '' if chegual_quantity == '': chegual_quantity = 0", "int, QString)\", sTrCode, sRQName, i, \"미체결수량\") ok_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\",", "elif code not in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt += 1", "이벤트루프 실행 def login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가 완료됐으면 이벤트 루프를 종료한다. self.login_event_loop.exit()", "if order_gubun == \"매수\" and not_quantity > 0 and e > meme_price: order_success", "= abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price = abs(int(first_buy_price)) if sCode not in", "in range(cnt): data = [] current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "\"종목명\") # 출럭 : 한국기업평가 stock_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "total_price += int(value[1]) moving_average_price_prev = total_price / 120 if moving_average_price_prev <= int(self.calcul_data[idx][6]) and", "> moving_average_price_prev and check_price > prev_price: self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자 이평선 가격보다 낮은", "f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm, str(self.calcul_data[0][1]))) f.close() elif pass_success == False: self.logging.logger.debug(\"조건부 통과 못함\")", "있는지 체크 if self.calcul_data == None or len(self.calcul_data) < 120: pass_success = False", "> 5 or meme_rate < -5): order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int,", "동작\", fallback=\"주식 자동화 프로그램 동작\", text=\"주식 자동화 프로그램이 동작 되었습니다.\" ) def get_ocx_instance(self):", "QString)\", sTrCode, sRQName, i, \"종목명\") order_no = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문수량\") order_price = self.dynamicCall(\"GetCommData(QString, QString, int,", "int(buy_price.strip()) learn_rate = float(learn_rate.strip()) current_price = int(current_price.strip()) total_chegual_price = int(total_chegual_price.strip()) possible_quantity = int(possible_quantity.strip())", "code_nm = code_nm.strip() order_no = int(order_no.strip()) order_status = order_status.strip() order_quantity = int(order_quantity.strip()) order_price", "price_top_moving = True prev_price = int(self.calcul_data[idx][7]) break idx += 1 # 해당부분 이평선이", "self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price", ": \"000000\" order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막 주문번호 order_status =", "‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]] cnt =", "이벤트루프 self.detail_account_info_event_loop = QEventLoop() # 예수금 요청용 이벤트루프 self.calculator_event_loop = QEventLoop() ######################################### ###########", "moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx <= 20: self.logging.logger.debug(\"20일 동안 주가가 120일 이평선과 같거나", "self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if stock_quan == 0: del", "\"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1,", "’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]] cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode,", "i, \"현재가\") # 현재가 : 000000003450 total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "\"r\"을 인자로 던져주면 파일 내용을 읽어 오겠다는 뜻이다. lines = f.readlines() #파일에 있는", "break total_price = 0 for value in self.calcul_data[idx:120+idx]: total_price += int(value[1]) moving_average_price_prev =", "# 출력: '151028' chegual_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default : ''", "self.logging = Logging() # self.slack = Slack() #슬랙 동작 #print(\"kiwoom() class start. \")", "\"체결량\") code = code.strip() code_nm = code_nm.strip() order_no = int(order_no.strip()) order_status = order_status.strip()", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name = stock_name.strip() current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가'])", "chegual_quantity = 0 else: chegual_quantity = int(chegual_quantity) current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) # 출력:", "PyQt5.QtCore import * from config.errorCode import * from PyQt5.QtTest import * from config.kiwoomType", "수 %s\" % cnt) for i in range(cnt): data = [] current_price =", "= False check_price = None if int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price <= int(self.calcul_data[0][6]):", "# 출력 : 000070 value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "# 수익률 : -000000001.94 current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt += 1 # 실시간 데이터 얻어오기", "QString)\", sTrCode, sRQName, i, \"주문가격\") order_gubun = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "int, QString)\", sTrCode, sRQName, 0, \"총매입금액\") self.total_buy_money = int(total_buy_money) total_profit_loss_money = self.dynamicCall(\"GetCommData(QString, QString,", "in self.portfolio_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) # 스크린번호 할당 cnt =", "실시간 데이터 얻어오기 def realdata_slot(self, sCode, sRealType, sRealData): if sRealType == \"장시작시간\": fid", "int, int, QString, QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3, code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num]", "A039423 // 알파벳 A는 장내주식, J는 ELW종목, Q는 ETN종목 code = code.strip()[1:] code_nm", "int, QString, int, int, QString, QString)\", [\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, jd['주문가능수량'], 0,", "실행 self.get_ocx_instance() #OCX 방식을 파이썬에 사용할 수 있게 변환해 주는 함수 self.event_slots() #", "#파일에 있는 내용들이 모두 읽어와 진다. for line in lines: #줄바꿈된 내용들이 한줄", "abs(int(f)) g = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124 매수일때, -2034", "= self.use_money / 4 output_deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0,", "--- %s\" %(sScrNo, sRQName, sTrCode, msg)) #파일 삭제 def file_delete(self): if os.path.isfile(\"files/condition_stock.txt\"): os.remove(\"files/condition_stock.txt\")", "QString)\", sTrCode, sRQName, 0, \"출금가능금액\") self.output_deposit = int(output_deposit) self.logging.logger.debug(\"예수금 : %s\" % self.output_deposit)", "int(total_buy_price) meme_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price", "\"총매입금액\") self.total_buy_money = int(total_buy_money) total_profit_loss_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0,", "order_num] ) if order_success == 0: self.logging.logger.debug(\"매수취소 전달 성공\") else: self.logging.logger.debug(\"매수취소 전달 실패\")", "self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\":", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3 order_quan = int(order_quan) order_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격'])", "+= 1 self.screen_meme_stock = str(meme_screen) if code in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)})", "self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' : possible_quantity}) self.logging.logger.debug(\"sPreNext : %s\" % sPrevNext) print(\"계좌에", "전달 실패\") elif d > 2.0 and sCode not in self.jango_dict: self.logging.logger.debug(\"매수조건 통과", "i = abs(int(i)) j = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530", "int)\", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530 k = abs(int(k)) if sCode not", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530 i = abs(int(i)) j =", "== 0: self.logging.logger.debug(\"매도주문 전달 성공\") else: self.logging.logger.debug(\"매도주문 전달 실패\") elif d > 2.0", "가격이 오늘자 이평선 가격보다 낮은 것 확인\") self.logging.logger.debug(\"포착된 부분의 저가가 오늘자 주가의 고가보다", "'': chegual_price = 0 else: chegual_price = int(chegual_price) chegual_quantity = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) #", "= self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in range(rows): code = self.dynamicCall(\"GetCommData(QString, QString,", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000 first_buy_price = abs(int(first_buy_price)) ######## 새로 들어온 주문이면", "#OCX 방식을 파이썬에 사용할 수 있게 변환해 주는 함수 self.event_slots() # 키움과 연결하기", "learn_rate, current_price)) if code in self.account_stock_dict: # dictionary 에 해당 종목이 있나 확인", "구간 확인됨\") price_top_moving = True prev_price = int(self.calcul_data[idx][7]) break idx += 1 #", "Code : %s is updating... \" % (idx + 1, len(code_list), code)) self.day_kiwoom_db(code=code)", "j = abs(int(j)) k = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530", "detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\",", "code not in screen_overwrite: screen_overwrite.append(code) #미체결에 있는 종목들 for order_number in self.not_account_stock_dict.keys(): code", "self.account_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) #미체결에 있는 종목들 for order_number in", "= self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") # 계좌번호 반환 account_num = account_list.split(';')[0] self.account_num = account_num self.logging.logger.debug(\"계좌번호", "4 output_deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"출금가능금액\") self.output_deposit =", "(idx + 1, len(code_list), code)) self.day_kiwoom_db(code=code) def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다", "> 2.0 and sCode not in self.jango_dict: self.logging.logger.debug(\"매수조건 통과 %s \" % sCode)", "meme_screen += 1 self.screen_meme_stock = str(meme_screen) if code in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\":", "self.jango_dict: self.logging.logger.debug(\"매수조건 통과 %s \" % sCode) result = (self.use_money * 0.1) /", "0, \"총평가손익금액\") self.total_profit_loss_money = int(total_profit_loss_money) total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "in self.not_account_stock_dict: pass else: self.not_account_stock_dict[order_no] = {} self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no})", "read_code(self): if os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에 파일이 있는지 체크한다. f = open(\"files/condition_stock.txt\", \"r\",", "+(-)2520 b = abs(int(b)) c = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 :", "QString)\", \"종목코드\", code) self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\") if date != None: self.dynamicCall(\"SetInputValue(QString, QString)\",", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name = stock_name.strip() current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price = abs(int(current_price)) stock_quan", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"고가\") # 출력 : 000070 low_price", "120일 이평선을 그릴만큼의 데이터가 있는지 체크 if self.calcul_data == None or len(self.calcul_data) <", "[\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1, sCode, quantity, e, self.realType.SENDTYPE['거래구분']['지정가'], \"\"] ) if order_success ==", "config.errorCode import * from PyQt5.QtTest import * from config.kiwoomType import * from config.log_class", "i, \"종목명\") order_no = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문번호\") order_status", "self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010 first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력:", "‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’,", "self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분':", "있게 변환해 주는 함수 self.event_slots() # 키움과 연결하기 위한 시그널 / 슬롯 모음", "sGubun, nItemCnt, sFidList): if int(sGubun) == 0: #주문체결 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode", "import * from PyQt5.QtTest import * from config.kiwoomType import * from config.log_class import", "self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num,", "i, \"거래량\") # 출력 : 000070 trading_value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "if sPrevNext == \"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총 일수 %s\" % len(self.calcul_data)) pass_success", "sScrNo) # 스크린번호 연결 끊기 def get_code_list_by_market(self, market_code): ''' 종목코드 리스트 받기 #0:장내,", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문가격\") order_gubun = self.dynamicCall(\"GetCommData(QString, QString,", "\"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총 일수 %s\" % len(self.calcul_data)) pass_success = False #", "0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") del self.account_stock_dict[sCode]", "int)\", sCode, fid) if value == '0': self.logging.logger.debug(\"장 시작 전\") elif value ==", "종목들 for order_number in self.not_account_stock_dict.keys(): code = self.not_account_stock_dict[order_number]['종목코드'] if code not in screen_overwrite:", "code = code.strip()[1:] code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\")", "이벤트루프 self.calculator_event_loop = QEventLoop() ######################################### ########### 전체 종목 관리 self.all_stock_dict = {} ###########################", "= abs(int(f)) g = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124 매수일때,", "오겠다는 뜻이다. lines = f.readlines() #파일에 있는 내용들이 모두 읽어와 진다. for line", "프로그램 동작\", text=\"주식 자동화 프로그램이 동작 되었습니다.\" ) def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에", "= code_nm.strip() stock_quantity = int(stock_quantity.strip()) buy_price = int(buy_price.strip()) learn_rate = float(learn_rate.strip()) current_price =", "self.not_account_stock_dict[order_number].update({\"주문/체결시간\": chegual_time_str}) self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif", "sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan})", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목번호\") # 출력 : A039423", "int, QString)\", sTrCode, sRQName, i, \"매입금액\") possible_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "시그널 / 슬롯 연결 self.signal_login_commConnect() #로그인 요청 시그널 포함 self.get_account_info() #계좌번호 가져오기 self.detail_account_info()", "True: self.logging.logger.debug(\"조건부 통과됨\") code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code) f = open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\"", "if value == '0': self.logging.logger.debug(\"장 시작 전\") elif value == '3': self.logging.logger.debug(\"장 시작\")", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name = stock_name.strip() origin_order_number = self.dynamicCall(\"GetChejanData(int)\",", "출력 : +(-)2530 i = abs(int(i)) j = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['시가']) #", "[\"신규매도\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success ==", "self.not_account_stock_dict[order_number].update({\"체결가\": chegual_price}) self.not_account_stock_dict[order_number].update({\"체결량\": chegual_quantity}) self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif int(sGubun) ==", "order_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000 order_price = int(order_price) not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\",", "g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\": k}) if sCode in self.account_stock_dict.keys()", "self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def screen_number_setting(self): screen_overwrite = [] #계좌평가잔고내역에 있는 종목들 for code in", "self.calculator_event_loop.exec_() def read_code(self): if os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에 파일이 있는지 체크한다. f =", "QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에 미체결 종목들 가져오기 실행 ######################################### QTest.qWait(10000) self.read_code() self.screen_number_setting() QTest.qWait(5000)", "전달 실패\") elif not_quantity == 0: del self.not_account_stock_dict[order_num] # 실시간 체결 정보 def", "b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h})", "QString, QString, int, QString, int, int, QString, QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3, code,", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목명']) stock_name = stock_name.strip()", "int, QString)\", sTrCode, sRQName, 0, \"총수익률(%)\") self.total_profit_loss_rate = float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 : %s", "self.logging.logger.debug(\"예수금 : %s\" % self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif sRQName == \"계좌평가잔고내역요청\": total_buy_money =", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name = stock_name.strip() origin_order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse", "\" % len(code_list)) for idx, code in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린 연결", "\"총수익률(%)\") self.total_profit_loss_rate = float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 : %s - %s - %s\" %", "meme_price: order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\",", "pretext=\"주식자동화 프로그램 동작\", title=\"주식 자동화 프로그램 동작\", fallback=\"주식 자동화 프로그램 동작\", text=\"주식 자동화", "# 예수금 요청용 이벤트루프 self.calculator_event_loop = QEventLoop() ######################################### ########### 전체 종목 관리 self.all_stock_dict", "# 출력 : 000070 data.append(\"\") data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\")", "order_status = order_status.strip() order_quantity = int(order_quantity.strip()) order_price = int(order_price.strip()) order_gubun = order_gubun.strip().lstrip('+').lstrip('-') not_quantity", "\"opt10081\", sPrevNext, self.screen_calculation_stock) # Tr서버로 전송 -Transaction self.calculator_event_loop.exec_() def read_code(self): if os.path.exists(\"files/condition_stock.txt\"): #", "def event_slots(self): self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot) # 트랜잭션 요청 관련 이벤트", "% self.output_deposit) self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif sRQName == \"계좌평가잔고내역요청\": total_buy_money = self.dynamicCall(\"GetCommData(QString, QString, int,", "종목 : %s \" % self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif sRQName == \"주식일봉차트조회\": code =", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520 e = abs(int(e)) f =", "self.total_profit_loss_money = int(total_profit_loss_money) total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총수익률(%)\")", "= abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000 first_buy_price = abs(int(first_buy_price)) ########", "order_gubun.strip().lstrip('+').lstrip('-') not_quantity = int(not_quantity.strip()) ok_quantity = int(ok_quantity.strip()) if order_no in self.not_account_stock_dict: pass else:", "\"시가\") # 출력 : 000070 high_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "\"주문수량\") order_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문가격\") order_gubun =", "== 0: temp_screen += 1 self.screen_real_stock = str(temp_screen) if (cnt % 50) ==", "= {} ######################## ########### 종목 분석 용 self.calcul_data = [] ########################################## ####### 요청", "= code_list.split(';')[:-1] return code_list def calculator_fnc(self): ''' 종목 분석관련 함수 모음 :return: '''", "int, QString)\", sTrCode, sRQName, i, \"종목명\") order_no = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "i = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530 i = abs(int(i))", "self.realType.REALTYPE['잔고']['종목명']) stock_name = stock_name.strip() current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['현재가']) current_price = abs(int(current_price)) stock_quan =", "출력 : 000070 high_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"고가\")", "000070 data.append(\"\") data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy()) if sPrevNext", "# 출력 : +(-)2530 k = abs(int(k)) if sCode not in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}})", "QString, int, QString)\", sTrCode, sRQName, i, \"주문상태\") # 접수,확인,체결 order_quantity = self.dynamicCall(\"GetCommData(QString, QString,", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000 current_price = abs(int(current_price)) first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) #", "번호 self.screen_real_stock = \"5000\" #종목별 할당할 스크린 번호 self.screen_meme_stock = \"6000\" #종목별 할당할", "account_num) def detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString,", "None if bottom_stock_price == True: moving_average_price_prev = 0 price_top_moving = False idx =", "QString, int, QString)\", sTrCode, sRQName, i, \"일자\") # 출력 : 000070 start_price =", "__init__(self): super().__init__() self.realType = RealType() self.logging = Logging() # self.slack = Slack() #슬랙", "QString)\", \"기준일자\", date) self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock) # Tr서버로", "idx = 1 while True: if len(self.calcul_data[idx:]) < 120: # 120일치가 있는지 계속", "self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list = code_list.split(';')[:-1] return code_list def calculator_fnc(self): ''' 종목 분석관련 함수", "/ 120 # 오늘자 주가가 120일 이평선에 걸쳐있는지 확인 bottom_stock_price = False check_price", "QString, int, QString)\", sTrCode, sRQName, 0, \"총평가손익금액\") self.total_profit_loss_money = int(total_profit_loss_money) total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString,", "self.screen_meme_stock = str(meme_screen) if code in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif code", "#출력가능 금액 self.total_profit_loss_money = 0 #총평가손익금액 self.total_profit_loss_rate = 0.0 #총수익률(%) ######################################## ######## 종목", "int)\", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530 i = abs(int(i)) j = self.dynamicCall(\"GetCommRealData(QString,", "self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name = stock_name.strip() origin_order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) #", "시그널 self.login_event_loop.exec_() # 이벤트루프 실행 def login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가 완료됐으면 이벤트", "f = abs(int(f)) g = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['거래량']) # 출력 : +240124", "종목 분석 용 self.calcul_data = [] ########################################## ####### 요청 스크린 번호 self.screen_my_info =", "asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") del", "code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code) f = open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm,", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"체결량\") code = code.strip() code_nm", "from config.slack import * class Kiwoom(QAxWidget): def __init__(self): super().__init__() self.realType = RealType() self.logging", "sys from PyQt5.QAxContainer import * from PyQt5.QtCore import * from config.errorCode import *", "sScrNo, sRQName, sTrCode, sRecordName, sPrevNext): if sRQName == \"예수금상세현황요청\": deposit = self.dynamicCall(\"GetCommData(QString, QString,", "self.stop_screen_cancel(self.screen_my_info) self.detail_account_info_event_loop.exit() elif sRQName == \"계좌평가잔고내역요청\": total_buy_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default : '' if chegual_quantity == '':", "QString, int, QString)\", sTrCode, sRQName, i, \"종목번호\") # 출력 : A039423 // 알파벳", "출력 : 000070 data.append(\"\") data.append(current_price.strip()) data.append(value.strip()) data.append(trading_value.strip()) data.append(date.strip()) data.append(start_price.strip()) data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy())", "elif sRQName == \"실시간미체결요청\": rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in", "code_list = code_list.split(';')[:-1] return code_list def calculator_fnc(self): ''' 종목 분석관련 함수 모음 :return:", "get_account_info(self): account_list = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") # 계좌번호 반환 account_num = account_list.split(';')[0] self.account_num =", "QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"예수금상세현황요청\", \"opw00001\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def", "\"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"예수금상세현황요청\", \"opw00001\", sPrevNext,", ") def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") # 레지스트리에 저장된 api 모듈 불러오기 def event_slots(self): self.OnEventConnect.connect(self.login_slot)", "모음 self.real_event_slot() # 실시간 이벤트 시그널 / 슬롯 연결 self.signal_login_commConnect() #로그인 요청 시그널", "updating... \" % (idx + 1, len(code_list), code)) self.day_kiwoom_db(code=code) def day_kiwoom_db(self, code=None, date=None,", "조건 통과 못함\") price_top_moving = False break elif int(self.calcul_data[idx][7]) > moving_average_price_prev and idx", "# 출럭: 0115061 마지막 주문번호 order_status = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인,", "sRealType, sRealData): if sRealType == \"장시작시간\": fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작,", "= code.strip() code_nm = code_nm.strip() order_no = int(order_no.strip()) order_status = order_status.strip() order_quantity =", "stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) # 스크린번호 연결 끊기 def get_code_list_by_market(self, market_code): ''' 종목코드", "= self.account_stock_dict[sCode] meme_rate = (b - asd['매입가']) / asd['매입가'] * 100 if asd['매매가능수량']", "(self.use_money * 0.1) / e quantity = int(result) order_success = self.dynamicCall( \"SendOrder(QString, QString,", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"종목코드\") code = code.strip() # data", "= True prev_price = int(self.calcul_data[idx][7]) break idx += 1 # 해당부분 이평선이 가장", "and sCode not in self.jango_dict.keys(): asd = self.account_stock_dict[sCode] meme_rate = (b - asd['매입가'])", "QString, int, QString)\", sTrCode, sRQName, 0, \"예수금\") self.deposit = int(deposit) use_money = float(self.deposit)", "abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}}) f.close() def merge_dict(self): self.all_stock_dict.update({\"계좌평가잔고내역\": self.account_stock_dict}) self.all_stock_dict.update({'미체결종목': self.not_account_stock_dict}) self.all_stock_dict.update({'포트폴리오종목': self.portfolio_stock_dict}) def", "sRQName, i, \"현재가\") # 현재가 : 000000003450 total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결량']) # 출력: 5 default : '' if chegual_quantity == '': chegual_quantity", "주문용스크린 번호 self.screen_start_stop_real = \"1000\" #장 시작/종료 실시간 스크린번호 ######################################## ######### 초기 셋팅", "self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 2, sCode, asd['매매가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0:", "= None if int(self.calcul_data[0][7]) <= moving_average_price and moving_average_price <= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가 120이평선", "# 출력: -매도, +매수 order_gubun = order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력:", "abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price = abs(int(first_buy_price)) if sCode not in self.jango_dict.keys():", "self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트 def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") # 로그인 요청 시그널 self.login_event_loop.exec_()", "int, QString)\", sTrCode, sRQName, i, \"현재가\") # 출력 : 000070 value = self.dynamicCall(\"GetCommData(QString,", "= open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\") f.write(\"%s\\t%s\\t%s\\n\" % (code, code_nm, str(self.calcul_data[0][1]))) f.close() elif pass_success ==", "갯수 %s \" % len(code_list)) for idx, code in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) #", "# 출력 : 3 order_quan = int(order_quan) order_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) # 출력:", "= ls[0] stock_name = ls[1] stock_price = int(ls[2].split(\"\\n\")[0]) stock_price = abs(stock_price) self.portfolio_stock_dict.update({stock_code:{\"종목명\":stock_name, \"현재가\":stock_price}})", "self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan})", "self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock) # Tr서버로 전송 -Transaction self.calculator_event_loop.exec_()", "QString, int, QString)\", sTrCode, sRQName, 0, \"총매입금액\") self.total_buy_money = int(total_buy_money) total_profit_loss_money = self.dynamicCall(\"GetCommData(QString,", "접수,확인,체결 order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문수량\") order_price =", "이평선 가격보다 낮은 것 확인\") self.logging.logger.debug(\"포착된 부분의 저가가 오늘자 주가의 고가보다 낮은지 확인\")", "self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결 종목 : %s \" % self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif", "- %s\" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate)) rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for", "# 스크린번호 연결 끊기 def get_code_list_by_market(self, market_code): ''' 종목코드 리스트 받기 #0:장내, 10:코스닥", "{} self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price})", "not_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"미체결수량\") ok_quantity = self.dynamicCall(\"GetCommData(QString,", "order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결 종목", "체결 order_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) # 출력 : 3 order_quan = int(order_quan) order_price", "스크린 번호 self.screen_my_info = \"2000\" #계좌 관련한 스크린 번호 self.screen_calculation_stock = \"4000\" #계산용", "주가가 120일 이평선과 같거나 위에 있으면 조건 통과 못함\") price_top_moving = False break", "<= 20: self.logging.logger.debug(\"20일 동안 주가가 120일 이평선과 같거나 위에 있으면 조건 통과 못함\")", "ls = line.split(\"\\t\") stock_code = ls[0] stock_name = ls[1] stock_price = int(ls[2].split(\"\\n\")[0]) stock_price", "조회하면서 120일 이평선보다 주가가 계속 밑에 존재하는지 확인 prev_price = None if bottom_stock_price", "KOSDAQ Stock Code : %s is updating... \" % (idx + 1, len(code_list),", "#예수금 self.use_money = 0 #실제 투자에 사용할 금액 self.use_money_percent = 0.5 #예수금에서 실제", "% (total_buy_money, total_profit_loss_money, total_profit_loss_rate)) rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in", "000000000000010 buy_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입가\") # 매입가", "장 종료\") for code in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000) self.file_delete() self.calculator_fnc()", "= False # 120일 이평선을 그릴만큼의 데이터가 있는지 체크 if self.calcul_data == None", "self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\":", "%s, tr코드: %s --- %s\" %(sScrNo, sRQName, sTrCode, msg)) #파일 삭제 def file_delete(self):", "self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028' chegual_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default :", "있으면 조건 통과 못함\") price_top_moving = False break elif int(self.calcul_data[idx][7]) > moving_average_price_prev and", "total_buy_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총매입금액\") self.total_buy_money = int(total_buy_money)", "%s - %s - %s\" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate)) rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\",", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"예수금상세현황요청\",", "매도일 때 g = abs(int(g)) h = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력", "투자에 사용할 금액 self.use_money_percent = 0.5 #예수금에서 실제 사용할 비율 self.output_deposit = 0", "= f.readlines() #파일에 있는 내용들이 모두 읽어와 진다. for line in lines: #줄바꿈된", "# 과거 일봉 데이터를 조회하면서 120일 이평선보다 주가가 계속 밑에 존재하는지 확인 prev_price", "'' if chegual_price == '': chegual_price = 0 else: chegual_price = int(chegual_price) chegual_quantity", "del self.jango_dict[sCode] #송수신 메세지 get def msg_slot(self, sScrNo, sRQName, sTrCode, msg): self.logging.logger.debug(\"스크린: %s,", "''' code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list = code_list.split(';')[:-1] return code_list def calculator_fnc(self): '''", "first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price = abs(int(first_buy_price)) if sCode not", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입가\") # 매입가 : 000000000054100", "screen_number_setting(self): screen_overwrite = [] #계좌평가잔고내역에 있는 종목들 for code in self.account_stock_dict.keys(): if code", "code not in screen_overwrite: screen_overwrite.append(code) #포트폴리로에 담겨있는 종목들 for code in self.portfolio_stock_dict.keys(): if", "(cnt % 50) == 0: temp_screen += 1 self.screen_real_stock = str(temp_screen) if (cnt", "파일이 있는지 체크한다. f = open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") # \"r\"을 인자로 던져주면 파일", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name = stock_name.strip() origin_order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 :", "return code_list def calculator_fnc(self): ''' 종목 분석관련 함수 모음 :return: ''' code_list =", ": 000070 value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래량\") #", "market_code): ''' 종목코드 리스트 받기 #0:장내, 10:코스닥 :param market_code: 시장코드 입력 :return: '''", "출력: 2110 default : '' if chegual_price == '': chegual_price = 0 else:", "= \"1000\" #장 시작/종료 실시간 스크린번호 ######################################## ######### 초기 셋팅 함수들 바로 실행", "None: self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date) self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock)", "QString, int, int, QString, QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3, code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'],", "== True: self.logging.logger.debug(\"조건부 통과됨\") code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code) f = open(\"files/condition_stock.txt\", \"a\", encoding=\"utf8\")", "sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124 h = abs(int(h)) i = self.dynamicCall(\"GetCommRealData(QString, int)\",", "int(value[1]) moving_average_price_prev = total_price / 120 if moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx <=", "from config.kiwoomType import * from config.log_class import * # from config.slack import *", "= 0 for code in screen_overwrite: temp_screen = int(self.screen_real_stock) meme_screen = int(self.screen_meme_stock) if", "성공\") del self.account_stock_dict[sCode] else: self.logging.logger.debug(\"매도주문 전달 실패\") elif sCode in self.jango_dict.keys(): jd =", "i, \"미체결수량\") ok_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"체결량\") code", "* from PyQt5.QtCore import * from config.errorCode import * from PyQt5.QtTest import *", "+(-)2520 c = abs(int(c)) d = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 :", "QEventLoop() ######################################### ########### 전체 종목 관리 self.all_stock_dict = {} ########################### ####### 계좌 관련된", "{} ######################## ########### 종목 분석 용 self.calcul_data = [] ########################################## ####### 요청 스크린", "if pass_success == True: self.logging.logger.debug(\"조건부 통과됨\") code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code) f = open(\"files/condition_stock.txt\",", "= code_nm.strip() order_no = int(order_no.strip()) order_status = order_status.strip() order_quantity = int(order_quantity.strip()) order_price =", "체크 if self.calcul_data == None or len(self.calcul_data) < 120: pass_success = False else:", ": %s - %s - %s\" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate)) rows = self.dynamicCall(\"GetRepeatCnt(QString,", "Stock Code : %s is updating... \" % (idx + 1, len(code_list), code))", "ok_quantity = int(ok_quantity.strip()) if order_no in self.not_account_stock_dict: pass else: self.not_account_stock_dict[order_no] = {} self.not_account_stock_dict[order_no].update({'종목코드':", "order_success == 0: self.logging.logger.debug(\"매수취소 전달 성공\") else: self.logging.logger.debug(\"매수취소 전달 실패\") elif not_quantity ==", "\"opw00018\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\",", "\" % rows) if sPrevNext == \"2\": self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit() elif sRQName ==", "not_quantity = int(not_quantity.strip()) ok_quantity = int(ok_quantity.strip()) if order_no in self.not_account_stock_dict: pass else: self.not_account_stock_dict[order_no]", "- 수익률: %s - 현재가: %s\" % ( code, code_nm, stock_quantity, buy_price, learn_rate,", "current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name}) self.jango_dict[sCode].update({\"보유수량\": stock_quan}) self.jango_dict[sCode].update({\"주문가능수량\": like_quan}) self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price})", "int(self.screen_real_stock) meme_screen = int(self.screen_meme_stock) if (cnt % 50) == 0: temp_screen += 1", "if jd['주문가능수량'] > 0 and (meme_rate > 5 or meme_rate < -5): order_success", "연결 끊기 self.logging.logger.debug(\"%s / %s : KOSDAQ Stock Code : %s is updating...", "order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결 종목 : %s", "order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"신규매도\",", "self.total_profit_loss_money = 0 #총평가손익금액 self.total_profit_loss_rate = 0.0 #총수익률(%) ######################################## ######## 종목 정보 가져오기", "len(code_list)) for idx, code in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock) # 스크린 연결 끊기 self.logging.logger.debug(\"%s", "= float(d) e = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520 e", "(b - jd['매입단가']) / jd['매입단가'] * 100 if jd['주문가능수량'] > 0 and (meme_rate", "%s - 매입가:%s - 수익률: %s - 현재가: %s\" % ( code, code_nm,", "= abs(int(k)) if sCode not in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\":", "= int(order_price.strip()) order_gubun = order_gubun.strip().lstrip('+').lstrip('-') not_quantity = int(not_quantity.strip()) ok_quantity = int(ok_quantity.strip()) if order_no", "sTrCode, sRQName) self.logging.logger.debug(\"남은 일자 수 %s\" % cnt) for i in range(cnt): data", "입력 :return: ''' code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list = code_list.split(';')[:-1] return code_list def", "1, len(code_list), code)) self.day_kiwoom_db(code=code) def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다 딜레이를 준다.", "del self.not_account_stock_dict[order_num] # 실시간 체결 정보 def chejan_slot(self, sGubun, nItemCnt, sFidList): if int(sGubun)", "else: self.not_account_stock_dict[order_no] = {} self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량':", "%s\" % account_num) def detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\",", "######################################### QTest.qWait(10000) self.read_code() self.screen_number_setting() QTest.qWait(5000) #실시간 수신 관련 함수 self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\",", "= False break elif int(self.calcul_data[idx][7]) > moving_average_price_prev and idx > 20: # 120일", "> 20: # 120일 이평선 위에 있는 구간 존재 self.logging.logger.debug(\"120일치 이평선 위에 있는", "= order_status.strip() order_quantity = int(order_quantity.strip()) order_price = int(order_price.strip()) order_gubun = order_gubun.strip().lstrip('+').lstrip('-') not_quantity =", "때 g = abs(int(g)) h = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 :", "first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price =", "market_code: 시장코드 입력 :return: ''' code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list = code_list.split(';')[:-1] return", "code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' :", "sTrCode, msg): self.logging.logger.debug(\"스크린: %s, 요청이름: %s, tr코드: %s --- %s\" %(sScrNo, sRQName, sTrCode,", "self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' : possible_quantity}) self.logging.logger.debug(\"sPreNext : %s\" % sPrevNext) print(\"계좌에 가지고 있는", "current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' : possible_quantity}) self.logging.logger.debug(\"sPreNext : %s\" % sPrevNext) print(\"계좌에 가지고", "trading_value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래대금\") # 출력 :", "함수 self.event_slots() # 키움과 연결하기 위한 시그널 / 슬롯 모음 self.real_event_slot() # 실시간", "elif d > 2.0 and sCode not in self.jango_dict: self.logging.logger.debug(\"매수조건 통과 %s \"", "self.jango_dict[sCode].update({\"매입단가\": buy_price}) self.jango_dict[sCode].update({\"총매입가\": total_buy_price}) self.jango_dict[sCode].update({\"매도매수구분\": meme_gubun}) self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if stock_quan ==", "self.calcul_data[:120]: total_price += int(value[1]) moving_average_price = total_price / 120 # 오늘자 주가가 120일", "% ( code, code_nm, stock_quantity, buy_price, learn_rate, current_price)) if code in self.account_stock_dict: #", "temp_screen = int(self.screen_real_stock) meme_screen = int(self.screen_meme_stock) if (cnt % 50) == 0: temp_screen", "# 실시간 데이터 얻어오기 def realdata_slot(self, sCode, sRealType, sRealData): if sRealType == \"장시작시간\":", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price = abs(int(first_buy_price))", "self.output_deposit = 0 #출력가능 금액 self.total_profit_loss_money = 0 #총평가손익금액 self.total_profit_loss_rate = 0.0 #총수익률(%)", "모두 읽어와 진다. for line in lines: #줄바꿈된 내용들이 한줄 씩 읽어와진다. if", "elif sRQName == \"주식일봉차트조회\": code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0,", "in screen_overwrite: screen_overwrite.append(code) #미체결에 있는 종목들 for order_number in self.not_account_stock_dict.keys(): code = self.not_account_stock_dict[order_number]['종목코드']", "self.OnReceiveMsg.connect(self.msg_slot) def real_event_slot(self): self.OnReceiveRealData.connect(self.realdata_slot) # 실시간 이벤트 연결 self.OnReceiveChejanData.connect(self.chejan_slot) #종목 주문체결 관련한 이벤트", "self.account_num, 3, code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num] ) if order_success == 0: self.logging.logger.debug(\"매수취소", "elif not_quantity == 0: del self.not_account_stock_dict[order_num] # 실시간 체결 정보 def chejan_slot(self, sGubun,", "i, \"매입금액\") possible_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매매가능수량\") self.logging.logger.debug(\"종목코드:", "120일치가 있는지 계속 확인 self.logging.logger.debug(\"120일치가 없음\") break total_price = 0 for value in", "\"r\", encoding=\"utf8\") # \"r\"을 인자로 던져주면 파일 내용을 읽어 오겠다는 뜻이다. lines =", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인, 체결 order_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) #", "f = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515 f = abs(int(f))", "True: if len(self.calcul_data[idx:]) < 120: # 120일치가 있는지 계속 확인 self.logging.logger.debug(\"120일치가 없음\") break", "screen_overwrite = [] #계좌평가잔고내역에 있는 종목들 for code in self.account_stock_dict.keys(): if code not", "self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\":", "= (b - jd['매입단가']) / jd['매입단가'] * 100 if jd['주문가능수량'] > 0 and", "+= 1 self.screen_real_stock = str(temp_screen) if (cnt % 50) == 0: meme_screen +=", "( code, code_nm, stock_quantity, buy_price, learn_rate, current_price)) if code in self.account_stock_dict: # dictionary", "order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\": order_quan}) self.not_account_stock_dict[order_number].update({\"주문가격\": order_price}) self.not_account_stock_dict[order_number].update({\"미체결수량\": not_chegual_quan}) self.not_account_stock_dict[order_number].update({\"원주문번호\": origin_order_number})", "self.realType.REALTYPE['잔고']['보유수량']) stock_quan = int(stock_quan) like_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan = int(like_quan) buy_price =", "= {} self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격':", "int)\", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520 b = abs(int(b)) c = self.dynamicCall(\"GetCommRealData(QString,", "과거 일봉 데이터를 조회하면서 120일 이평선보다 주가가 계속 밑에 존재하는지 확인 prev_price =", "= int(deposit) use_money = float(self.deposit) * self.use_money_percent self.use_money = int(use_money) self.use_money = self.use_money", "1 # 해당부분 이평선이 가장 최근의 이평선 가격보다 낮은지 확인 if price_top_moving ==", "code.strip()[1:] code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") # 출럭", "in self.jango_dict: self.logging.logger.debug(\"매수조건 통과 %s \" % sCode) result = (self.use_money * 0.1)", "실패\") elif d > 2.0 and sCode not in self.jango_dict: self.logging.logger.debug(\"매수조건 통과 %s", "종목명: %s - 보유수량: %s - 매입가:%s - 수익률: %s - 현재가: %s\"", "% self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif sRQName == \"주식일봉차트조회\": code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래량\") # 출력 :", "- 현재가: %s\" % ( code, code_nm, stock_quantity, buy_price, learn_rate, current_price)) if code", "로그인 요청 시그널 self.login_event_loop.exec_() # 이벤트루프 실행 def login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가", "# 출력 : +240124 매수일때, -2034 매도일 때 g = abs(int(g)) h =", "fallback=\"주식 자동화 프로그램 동작\", text=\"주식 자동화 프로그램이 동작 되었습니다.\" ) def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\")", "chejan_slot(self, sGubun, nItemCnt, sFidList): if int(sGubun) == 0: #주문체결 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호'])", "sTrCode, sRQName, i, \"고가\") # 출력 : 000070 low_price = self.dynamicCall(\"GetCommData(QString, QString, int,", "self.logging.logger.debug(\"20일 동안 주가가 120일 이평선과 같거나 위에 있으면 조건 통과 못함\") price_top_moving =", "포함 self.detail_account_mystock() #계좌평가잔고내역 요청 시그널 포함 QTimer.singleShot(5000, self.not_concluded_account) #5초 뒤에 미체결 종목들 가져오기", "sCode in self.jango_dict.keys(): jd = self.jango_dict[sCode] meme_rate = (b - jd['매입단가']) / jd['매입단가']", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString,", "self.use_money / 4 output_deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"출금가능금액\")", "account_num = account_list.split(';')[0] self.account_num = account_num self.logging.logger.debug(\"계좌번호 : %s\" % account_num) def detail_account_info(self,", "출력 : 000070 date = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"일자\")", "i, \"주문수량\") order_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문가격\") order_gubun", "while True: if len(self.calcul_data[idx:]) < 120: # 120일치가 있는지 계속 확인 self.logging.logger.debug(\"120일치가 없음\")", "== \"2\": self.logging.logger.debug(\"장 종료, 동시호가로 넘어감\") elif value == \"4\": self.logging.logger.debug(\"3시30분 장 종료\")", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"미체결수량\") ok_quantity = self.dynamicCall(\"GetCommData(QString, QString,", "tr코드: %s --- %s\" %(sScrNo, sRQName, sTrCode, msg)) #파일 삭제 def file_delete(self): if", "config.slack import * class Kiwoom(QAxWidget): def __init__(self): super().__init__() self.realType = RealType() self.logging =", "if code in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif code not in self.portfolio_stock_dict.keys():", "\"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def not_concluded_account(self, sPrevNext=\"0\"):", "경로에 파일이 있는지 체크한다. f = open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") # \"r\"을 인자로 던져주면", "sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530 k = abs(int(k)) if sCode not in", "trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext): if sRQName == \"예수금상세현황요청\": deposit = self.dynamicCall(\"GetCommData(QString,", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"보유수량\") # 보유수량 : 000000000000010", "int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS b = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['현재가']) #", "like_quan = int(like_quan) buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price = abs(int(buy_price)) total_buy_price = self.dynamicCall(\"GetChejanData(int)\",", "self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수 %s \" % len(code_list)) for idx, code in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\",", "2.0 and sCode not in self.jango_dict: self.logging.logger.debug(\"매수조건 통과 %s \" % sCode) result", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"수익률(%)\") # 수익률 : -000000001.94 current_price", "[[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’,", "'151028' chegual_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['체결가']) # 출력: 2110 default : '' if chegual_price", "‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]] cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) self.logging.logger.debug(\"남은", "+ 1, len(code_list), code)) self.day_kiwoom_db(code=code) def day_kiwoom_db(self, code=None, date=None, sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다 딜레이를", "in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt += 1 # 실시간 데이터", "d = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['등락율']) # 출력 : +(-)12.98 d = float(d)", "\"1\") self.slack.notification( pretext=\"주식자동화 프로그램 동작\", title=\"주식 자동화 프로그램 동작\", fallback=\"주식 자동화 프로그램 동작\",", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"거래대금\") # 출력 : 000070", "str(self.screen_meme_stock)}}) cnt += 1 # 실시간 데이터 얻어오기 def realdata_slot(self, sCode, sRealType, sRealData):", "for code in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000) self.file_delete() self.calculator_fnc() sys.exit() elif", "sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext): if sRQName ==", "origin_order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse : \"000000\" order_number = self.dynamicCall(\"GetChejanData(int)\",", "출럭: 0115061 마지막 주문번호 order_status = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인, 체결", "= abs(int(buy_price)) total_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의 총매입가 total_buy_price =", "moving_average_price_prev and check_price > prev_price: self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자 이평선 가격보다 낮은 것", "########### 전체 종목 관리 self.all_stock_dict = {} ########################### ####### 계좌 관련된 변수 self.account_stock_dict", "in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\": order_status}) self.not_account_stock_dict[order_number].update({\"주문수량\":", "%s \" % sCode) result = (self.use_money * 0.1) / e quantity =", "self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", screen_num, code, fids, \"1\") self.slack.notification( pretext=\"주식자동화 프로그램 동작\",", "일봉 데이터를 조회하면서 120일 이평선보다 주가가 계속 밑에 존재하는지 확인 prev_price = None", "QString)\", sTrCode, sRQName, i, \"현재가\") # 현재가 : 000000003450 total_chegual_price = self.dynamicCall(\"GetCommData(QString, QString,", "정보 가져오기 self.portfolio_stock_dict = {} self.jango_dict = {} ######################## ########### 종목 분석 용", "용 self.calcul_data = [] ########################################## ####### 요청 스크린 번호 self.screen_my_info = \"2000\" #계좌", "#실제 투자에 사용할 금액 self.use_money_percent = 0.5 #예수금에서 실제 사용할 비율 self.output_deposit =", "int(self.calcul_data[idx][6]) and idx <= 20: self.logging.logger.debug(\"20일 동안 주가가 120일 이평선과 같거나 위에 있으면", "int(deposit) use_money = float(self.deposit) * self.use_money_percent self.use_money = int(use_money) self.use_money = self.use_money /", "\"4\": self.logging.logger.debug(\"3시30분 장 종료\") for code in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000)", ": +(-)2530 j = abs(int(j)) k = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['저가']) # 출력", "QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock) # Tr서버로 전송 -Transaction self.calculator_event_loop.exec_() def read_code(self): if", "first_buy_price}) elif int(sGubun) == 1: #잔고 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\",", "sRQName == \"계좌평가잔고내역요청\": total_buy_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총매입금액\")", "if price_top_moving == True: if moving_average_price > moving_average_price_prev and check_price > prev_price: self.logging.logger.debug(\"포착된", "like_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan = int(like_quan) buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price =", "- 매입가:%s - 수익률: %s - 현재가: %s\" % ( code, code_nm, stock_quantity,", "pass_success == False: self.logging.logger.debug(\"조건부 통과 못함\") self.calcul_data.clear() self.calculator_event_loop.exit() def stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo)", "0, \"종목코드\") code = code.strip() # data = self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName) #", "QString, QString)\", screen_num, code, fids, \"1\") self.slack.notification( pretext=\"주식자동화 프로그램 동작\", title=\"주식 자동화 프로그램", "current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") # 현재가 :", "가장 최근의 이평선 가격보다 낮은지 확인 if price_top_moving == True: if moving_average_price >", "self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자 이평선 가격보다 낮은 것 확인\") self.logging.logger.debug(\"포착된 부분의 저가가 오늘자", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000 order_price = int(order_price) not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) #", "self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감) value = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, fid)", "% account_num) def detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호\", \"0000\")", "\"ACCNO\") # 계좌번호 반환 account_num = account_list.split(';')[0] self.account_num = account_num self.logging.logger.debug(\"계좌번호 : %s\"", "\"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def trdata_slot(self, sScrNo, sRQName, sTrCode, sRecordName, sPrevNext): if", "\"\"] ) if order_success == 0: self.logging.logger.debug(\"매수주문 전달 성공\") else: self.logging.logger.debug(\"매수주문 전달 실패\")", "meme_screen = int(self.screen_meme_stock) if (cnt % 50) == 0: temp_screen += 1 self.screen_real_stock", "sRQName, i, \"주문상태\") # 접수,확인,체결 order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "j = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530 j = abs(int(j))", "# 출력: -6010 first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000", "# 출력: 2110 default : '' if chegual_price == '': chegual_price = 0", "%s - 보유수량: %s - 매입가:%s - 수익률: %s - 현재가: %s\" %", "config.kiwoomType import * from config.log_class import * # from config.slack import * class", "lines: #줄바꿈된 내용들이 한줄 씩 읽어와진다. if line != \"\": ls = line.split(\"\\t\")", "\"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def trdata_slot(self,", "code_list def calculator_fnc(self): ''' 종목 분석관련 함수 모음 :return: ''' code_list = self.get_code_list_by_market(\"10\")", "= int(current_price.strip()) total_chegual_price = int(total_chegual_price.strip()) possible_quantity = int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\":", "total_buy_price = int(total_buy_price) meme_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price = self.dynamicCall(\"GetChejanData(int)\",", "0.0 #총수익률(%) ######################################## ######## 종목 정보 가져오기 self.portfolio_stock_dict = {} self.jango_dict = {}", "self.logging.logger.debug(\"종목코드: %s - 종목명: %s - 보유수량: %s - 매입가:%s - 수익률: %s", "not_quantity = self.not_account_stock_dict[order_num]['미체결수량'] order_gubun = self.not_account_stock_dict[order_num]['주문구분'] if order_gubun == \"매수\" and not_quantity >", "정보 def chejan_slot(self, sGubun, nItemCnt, sFidList): if int(sGubun) == 0: #주문체결 account_num =", "if (cnt % 50) == 0: meme_screen += 1 self.screen_meme_stock = str(meme_screen) if", "result = (self.use_money * 0.1) / e quantity = int(result) order_success = self.dynamicCall(", "self.realType.SENDTYPE['거래구분']['지정가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매수주문 전달 성공\") else: self.logging.logger.debug(\"매수주문 전달", "sTrCode, sRQName, i, \"종목명\") # 출럭 : 한국기업평가 stock_quantity = self.dynamicCall(\"GetCommData(QString, QString, int,", "def __init__(self): super().__init__() self.realType = RealType() self.logging = Logging() # self.slack = Slack()", "False # 120일 이평선을 그릴만큼의 데이터가 있는지 체크 if self.calcul_data == None or", "code = self.not_account_stock_dict[order_number]['종목코드'] if code not in screen_overwrite: screen_overwrite.append(code) #포트폴리로에 담겨있는 종목들 for", "존재 self.logging.logger.debug(\"120일치 이평선 위에 있는 구간 확인됨\") price_top_moving = True prev_price = int(self.calcul_data[idx][7])", "######## 새로 들어온 주문이면 주문번호 할당 if order_number not in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}})", "%s : KOSDAQ Stock Code : %s is updating... \" % (idx +", "QString, int, QString)\", sTrCode, sRQName, i, \"수익률(%)\") # 수익률 : -000000001.94 current_price =", "0 else: chegual_quantity = int(chegual_quantity) current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000 current_price", "Tr서버로 전송 -Transaction self.calculator_event_loop.exec_() def read_code(self): if os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에 파일이 있는지", "\"주식체결\": a = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS b = self.dynamicCall(\"GetCommRealData(QString,", "self.logging.logger.debug(\"미체결 종목 : %s \" % self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif sRQName == \"주식일봉차트조회\": code", ": 한국기업평가 stock_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"보유수량\") #", "%s --- %s\" %(sScrNo, sRQName, sTrCode, msg)) #파일 삭제 def file_delete(self): if os.path.isfile(\"files/condition_stock.txt\"):", "= self.not_account_stock_dict[order_num]['미체결수량'] order_gubun = self.not_account_stock_dict[order_num]['주문구분'] if order_gubun == \"매수\" and not_quantity > 0", "sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\")", "50) == 0: temp_screen += 1 self.screen_real_stock = str(temp_screen) if (cnt % 50)", "first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if stock_quan == 0: del self.jango_dict[sCode] #송수신 메세지 get def", "> 0 and e > meme_price: order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int,", "self.dynamicCall(\"CommConnect()\") # 로그인 요청 시그널 self.login_event_loop.exec_() # 이벤트루프 실행 def login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1])", "000070 high_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"고가\") # 출력", "= abs(int(b)) c = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520 c", "!= None: self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date) self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext,", "order_success == 0: self.logging.logger.debug(\"매수주문 전달 성공\") else: self.logging.logger.debug(\"매수주문 전달 실패\") not_meme_list = list(self.not_account_stock_dict)", "%s, 요청이름: %s, tr코드: %s --- %s\" %(sScrNo, sRQName, sTrCode, msg)) #파일 삭제", "'0': self.logging.logger.debug(\"장 시작 전\") elif value == '3': self.logging.logger.debug(\"장 시작\") elif value ==", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 : +(-)2530 i = abs(int(i)) j", "전달 실패\") not_meme_list = list(self.not_account_stock_dict) for order_num in not_meme_list: code = self.not_account_stock_dict[order_num][\"종목코드\"] meme_price", "# 출력 : 000070 low_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "is updating... \" % (idx + 1, len(code_list), code)) self.day_kiwoom_db(code=code) def day_kiwoom_db(self, code=None,", "if bottom_stock_price == True: moving_average_price_prev = 0 price_top_moving = False idx = 1", "QString, int, QString, int, int, QString, QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 1, sCode, quantity,", "<= int(self.calcul_data[0][6]): self.logging.logger.debug(\"오늘 주가 120이평선 아래에 걸쳐있는 것 확인\") bottom_stock_price = True check_price", "abs(int(first_buy_price)) if sCode not in self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\": stock_name})", "= float(learn_rate.strip()) current_price = int(current_price.strip()) total_chegual_price = int(total_chegual_price.strip()) possible_quantity = int(possible_quantity.strip()) self.account_stock_dict[code].update({\"종목명\": code_nm})", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"계좌평가잔고내역요청\",", "self.realType.REALTYPE[sRealType]['(최우선)매도호가']) # 출력 : +(-)2520 e = abs(int(e)) f = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "출력: -매도, +매수 order_gubun = order_gubun.strip().lstrip('+').lstrip('-') chegual_time_str = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문/체결시간']) # 출력: '151028'", "in self.jango_dict.keys(): asd = self.account_stock_dict[sCode] meme_rate = (b - asd['매입가']) / asd['매입가'] *", "0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") else: self.logging.logger.debug(\"매도주문", "self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\",", "= abs(int(first_buy_price)) if sCode not in self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode}) self.jango_dict[sCode].update({\"종목명\":", "self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"계좌평가잔고내역요청\", \"opw00018\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString,", "QString)\", \"수정주가구분\", \"1\") if date != None: self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date) self.dynamicCall(\"CommRqData(QString, QString,", "QString)\", sTrCode, sRQName, 0, \"총수익률(%)\") self.total_profit_loss_rate = float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 : %s -", "sRQName, i, \"종목명\") # 출럭 : 한국기업평가 stock_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "이평선 위에 있는 구간 존재 self.logging.logger.debug(\"120일치 이평선 위에 있는 구간 확인됨\") price_top_moving =", "QString, QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3, code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num] ) if", "확인\") bottom_stock_price = True check_price = int(self.calcul_data[0][6]) # 과거 일봉 데이터를 조회하면서 120일", "int(not_quantity.strip()) ok_quantity = int(ok_quantity.strip()) if order_no in self.not_account_stock_dict: pass else: self.not_account_stock_dict[order_no] = {}", "ok_quantity}) self.logging.logger.debug(\"미체결 종목 : %s \" % self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit() elif sRQName == \"주식일봉차트조회\":", ": +240124 매수일때, -2034 매도일 때 g = abs(int(g)) h = self.dynamicCall(\"GetCommRealData(QString, int)\",", "float(self.deposit) * self.use_money_percent self.use_money = int(use_money) self.use_money = self.use_money / 4 output_deposit =", "실패\") elif sCode in self.jango_dict.keys(): jd = self.jango_dict[sCode] meme_rate = (b - jd['매입단가'])", "self.logging.logger.debug(\"매수주문 전달 성공\") else: self.logging.logger.debug(\"매수주문 전달 실패\") not_meme_list = list(self.not_account_stock_dict) for order_num in", "> meme_price: order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString,", "self.realType.REALTYPE['잔고']['매입단가']) buy_price = abs(int(buy_price)) total_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의 총매입가", "total_price / 120 # 오늘자 주가가 120일 이평선에 걸쳐있는지 확인 bottom_stock_price = False", "i, \"매매가능수량\") self.logging.logger.debug(\"종목코드: %s - 종목명: %s - 보유수량: %s - 매입가:%s -", "출력 : defaluse : \"000000\" order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) # 출럭: 0115061 마지막", "10:코스닥 :param market_code: 시장코드 입력 :return: ''' code_list = self.dynamicCall(\"GetCodeListByMarket(QString)\", market_code) code_list =", "if order_number not in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name})", "else: self.logging.logger.debug(\"매도주문 전달 실패\") elif sCode in self.jango_dict.keys(): jd = self.jango_dict[sCode] meme_rate =", "전송 -Transaction self.calculator_event_loop.exec_() def read_code(self): if os.path.exists(\"files/condition_stock.txt\"): # 해당 경로에 파일이 있는지 체크한다.", "종목코드 리스트 받기 #0:장내, 10:코스닥 :param market_code: 시장코드 입력 :return: ''' code_list =", "int, QString)\", sTrCode, sRQName, 0, \"총평가손익금액\") self.total_profit_loss_money = int(total_profit_loss_money) total_profit_loss_rate = self.dynamicCall(\"GetCommData(QString, QString,", "있는 종목의 총매입가 total_buy_price = int(total_buy_price) meme_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매도매수구분']) meme_gubun = self.realType.REALTYPE['매도수구분'][meme_gubun]", "self.total_profit_loss_rate = 0.0 #총수익률(%) ######################################## ######## 종목 정보 가져오기 self.portfolio_stock_dict = {} self.jango_dict", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse : \"000000\" order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문번호']) #", "price_top_moving == True: if moving_average_price > moving_average_price_prev and check_price > prev_price: self.logging.logger.debug(\"포착된 이평선의", "sRQName, sTrCode, sRecordName, sPrevNext): if sRQName == \"예수금상세현황요청\": deposit = self.dynamicCall(\"GetCommData(QString, QString, int,", "code_nm, str(self.calcul_data[0][1]))) f.close() elif pass_success == False: self.logging.logger.debug(\"조건부 통과 못함\") self.calcul_data.clear() self.calculator_event_loop.exit() def", "stock_name = stock_name.strip() origin_order_number = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['원주문번호']) # 출력 : defaluse : \"000000\"", "total_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의 총매입가 total_buy_price = int(total_buy_price) meme_gubun", "d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\": f}) self.portfolio_stock_dict[sCode].update({\"거래량\": g}) self.portfolio_stock_dict[sCode].update({\"누적거래량\": h}) self.portfolio_stock_dict[sCode].update({\"고가\": i}) self.portfolio_stock_dict[sCode].update({\"시가\": j})", "self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for code in self.portfolio_stock_dict.keys(): screen_num = self.portfolio_stock_dict[code]['스크린번호'] fids =", "self.account_stock_dict[code].update({\"종목명\": code_nm}) self.account_stock_dict[code].update({\"보유수량\": stock_quantity}) self.account_stock_dict[code].update({\"매입가\": buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량'", "- 보유수량: %s - 매입가:%s - 수익률: %s - 현재가: %s\" % (", "not in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict.update({code: {\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt += 1 # 실시간", "i, \"거래대금\") # 출력 : 000070 date = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode,", "sScrNo, sRQName, sTrCode, msg): self.logging.logger.debug(\"스크린: %s, 요청이름: %s, tr코드: %s --- %s\" %(sScrNo,", "오늘자 이평선 가격보다 낮은 것 확인\") self.logging.logger.debug(\"포착된 부분의 저가가 오늘자 주가의 고가보다 낮은지", "stock_quantity = int(stock_quantity.strip()) buy_price = int(buy_price.strip()) learn_rate = float(learn_rate.strip()) current_price = int(current_price.strip()) total_chegual_price", "= int(self.screen_real_stock) meme_screen = int(self.screen_meme_stock) if (cnt % 50) == 0: temp_screen +=", "120: # 120일치가 있는지 계속 확인 self.logging.logger.debug(\"120일치가 없음\") break total_price = 0 for", "QString)\", sTrCode, sRQName, i, \"보유수량\") # 보유수량 : 000000000000010 buy_price = self.dynamicCall(\"GetCommData(QString, QString,", ": 000070 low_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"저가\") #", "최근 가격 구함 total_price = 0 for value in self.calcul_data[:120]: total_price += int(value[1])", "moving_average_price_prev = 0 price_top_moving = False idx = 1 while True: if len(self.calcul_data[idx:])", "self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code) self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\") if date != None: self.dynamicCall(\"SetInputValue(QString,", "수익률 : -000000001.94 current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\")", "code = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"종목코드\") code = code.strip()", "''' code_list = self.get_code_list_by_market(\"10\") self.logging.logger.debug(\"코스닥 갯수 %s \" % len(code_list)) for idx, code", "있는지 체크한다. f = open(\"files/condition_stock.txt\", \"r\", encoding=\"utf8\") # \"r\"을 인자로 던져주면 파일 내용을", "jd['매입단가']) / jd['매입단가'] * 100 if jd['주문가능수량'] > 0 and (meme_rate > 5", "* 100 if jd['주문가능수량'] > 0 and (meme_rate > 5 or meme_rate <", "cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) self.logging.logger.debug(\"남은 일자 수 %s\" % cnt) for", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"일자\") # 출력 : 000070 start_price", "QString, int, QString)\", \"주식일봉차트조회\", \"opt10081\", sPrevNext, self.screen_calculation_stock) # Tr서버로 전송 -Transaction self.calculator_event_loop.exec_() def", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan = int(like_quan) buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price = abs(int(buy_price))", "\"계좌평가잔고내역요청\": total_buy_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"총매입금액\") self.total_buy_money =", "스크린 연결 끊기 self.logging.logger.debug(\"%s / %s : KOSDAQ Stock Code : %s is", "self.realType.REALTYPE[sRealType]['저가']) # 출력 : +(-)2530 k = abs(int(k)) if sCode not in self.portfolio_stock_dict:", "i, \"주문번호\") order_status = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문상태\") #", "미체결 종목들 가져오기 실행 ######################################### QTest.qWait(10000) self.read_code() self.screen_number_setting() QTest.qWait(5000) #실시간 수신 관련 함수", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매매가능수량\") self.logging.logger.debug(\"종목코드: %s - 종목명:", "sPrevNext=\"0\"): QTest.qWait(3600) #3.6초마다 딜레이를 준다. self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code) self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\")", "prev_price = None if bottom_stock_price == True: moving_average_price_prev = 0 price_top_moving = False", "in self.jango_dict.keys(): jd = self.jango_dict[sCode] meme_rate = (b - jd['매입단가']) / jd['매입단가'] *", "code.strip() code_nm = code_nm.strip() order_no = int(order_no.strip()) order_status = order_status.strip() order_quantity = int(order_quantity.strip())", "order_status = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문상태\") # 접수,확인,체결 order_quantity", "있는 종목은 %s \" % rows) if sPrevNext == \"2\": self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit()", "QString)\", sTrCode, sRQName, i, \"종목코드\") code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "\"종목명\") order_no = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문번호\") order_status =", "‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’].", "screen_overwrite.append(code) # 스크린번호 할당 cnt = 0 for code in screen_overwrite: temp_screen =", "- asd['매입가']) / asd['매입가'] * 100 if asd['매매가능수량'] > 0 and (meme_rate >", "jd['주문가능수량'], 0, self.realType.SENDTYPE['거래구분']['시장가'], \"\"] ) if order_success == 0: self.logging.logger.debug(\"매도주문 전달 성공\") else:", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가']) first_buy_price = abs(int(first_buy_price)) if sCode not in self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\":", "os import sys from PyQt5.QAxContainer import * from PyQt5.QtCore import * from config.errorCode", "sTrCode, sRQName, 0, \"출금가능금액\") self.output_deposit = int(output_deposit) self.logging.logger.debug(\"예수금 : %s\" % self.output_deposit) self.stop_screen_cancel(self.screen_my_info)", "output_deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"출금가능금액\") self.output_deposit = int(output_deposit)", "sTrCode, sRQName, i, \"종목명\") order_no = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "있나 확인 pass else: self.account_stock_dict[code] = {} code_nm = code_nm.strip() stock_quantity = int(stock_quantity.strip())", "QString, int, QString)\", sTrCode, sRQName, i, \"보유수량\") # 보유수량 : 000000000000010 buy_price =", "not in screen_overwrite: screen_overwrite.append(code) # 스크린번호 할당 cnt = 0 for code in", "= {} self.jango_dict = {} ######################## ########### 종목 분석 용 self.calcul_data = []", "sRQName, i, \"수익률(%)\") # 수익률 : -000000001.94 current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "# 출력 : +(-)2520 e = abs(int(e)) f = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가'])", "self.use_money_percent self.use_money = int(use_money) self.use_money = self.use_money / 4 output_deposit = self.dynamicCall(\"GetCommData(QString, QString,", "def login_slot(self, err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가 완료됐으면 이벤트 루프를 종료한다. self.login_event_loop.exit() def get_account_info(self):", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan = int(like_quan) buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price = abs(int(buy_price)) total_buy_price", "/ e quantity = int(result) order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString,", "self.portfolio_stock_dict[code]['스크린번호'] fids = self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", screen_num, code, fids, \"1\") self.slack.notification(", "self.screen_real_stock = str(temp_screen) if (cnt % 50) == 0: meme_screen += 1 self.screen_meme_stock", "first_buy_price = abs(int(first_buy_price)) if sCode not in self.jango_dict.keys(): self.jango_dict.update({sCode:{}}) self.jango_dict[sCode].update({\"현재가\": current_price}) self.jango_dict[sCode].update({\"종목코드\": sCode})", "\"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString, QString, int,", "프로그램 동작\", title=\"주식 자동화 프로그램 동작\", fallback=\"주식 자동화 프로그램 동작\", text=\"주식 자동화 프로그램이", "code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun})", "120 if moving_average_price_prev <= int(self.calcul_data[idx][6]) and idx <= 20: self.logging.logger.debug(\"20일 동안 주가가 120일", "total_price = 0 for value in self.calcul_data[:120]: total_price += int(value[1]) moving_average_price = total_price", "위에 있는 구간 존재 self.logging.logger.debug(\"120일치 이평선 위에 있는 구간 확인됨\") price_top_moving = True", ": 000070 start_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"시가\") #", "= int(order_price) not_chegual_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default: 0 not_chegual_quan =", "변수모음 self.login_event_loop = QEventLoop() #로그인 요청용 이벤트루프 self.detail_account_info_event_loop = QEventLoop() # 예수금 요청용", "== None or len(self.calcul_data) < 120: pass_success = False else: # 120일 이평선의", "for order_num in not_meme_list: code = self.not_account_stock_dict[order_num][\"종목코드\"] meme_price = self.not_account_stock_dict[order_num]['주문가격'] not_quantity = self.not_account_stock_dict[order_num]['미체결수량']", "stock_quantity, buy_price, learn_rate, current_price)) if code in self.account_stock_dict: # dictionary 에 해당 종목이", ": A039423 // 알파벳 A는 장내주식, J는 ELW종목, Q는 ETN종목 code = code.strip()[1:]", "계속 밑에 존재하는지 확인 prev_price = None if bottom_stock_price == True: moving_average_price_prev =", "= float(self.deposit) * self.use_money_percent self.use_money = int(use_money) self.use_money = self.use_money / 4 output_deposit", "sTrCode, sRQName, i, \"일자\") # 출력 : 000070 start_price = self.dynamicCall(\"GetCommData(QString, QString, int,", "== \"주식체결\": a = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['체결시간']) # 출력 HHMMSS b =", "self.jango_dict = {} ######################## ########### 종목 분석 용 self.calcul_data = [] ########################################## #######", "'': chegual_quantity = 0 else: chegual_quantity = int(chegual_quantity) current_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['현재가']) #", "QString)\", sTrCode, sRQName) # [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’],", "== \"장시작시간\": fid = self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감) value =", "\"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"예수금상세현황요청\", \"opw00001\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def detail_account_mystock(self,", "class Kiwoom(QAxWidget): def __init__(self): super().__init__() self.realType = RealType() self.logging = Logging() # self.slack", "시작 전\") elif value == '3': self.logging.logger.debug(\"장 시작\") elif value == \"2\": self.logging.logger.debug(\"장", "current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif int(sGubun) == 1: #잔고 account_num = self.dynamicCall(\"GetChejanData(int)\",", "self.logging.logger.debug(\"계좌번호 : %s\" % account_num) def detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString,", "False else: # 120일 이평선의 최근 가격 구함 total_price = 0 for value", "(meme_rate > 5 or meme_rate < -5): order_success = self.dynamicCall( \"SendOrder(QString, QString, QString,", "{\"스크린번호\": str(self.screen_real_stock), \"주문용스크린번호\": str(self.screen_meme_stock)}}) cnt += 1 # 실시간 데이터 얻어오기 def realdata_slot(self,", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목명']) stock_name =", "= True if pass_success == True: self.logging.logger.debug(\"조건부 통과됨\") code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code) f", "통과 못함\") self.calcul_data.clear() self.calculator_event_loop.exit() def stop_screen_cancel(self, sScrNo=None): self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) # 스크린번호 연결 끊기", "QString, int, QString)\", sTrCode, sRQName, i, \"종목코드\") code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "self.account_num = account_num self.logging.logger.debug(\"계좌번호 : %s\" % account_num) def detail_account_info(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\",", "abs(int(i)) j = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['시가']) # 출력 : +(-)2530 j =", "self.not_account_stock_dict.keys(): code = self.not_account_stock_dict[order_number]['종목코드'] if code not in screen_overwrite: screen_overwrite.append(code) #포트폴리로에 담겨있는 종목들", "-6000 current_price = abs(int(current_price)) first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010 first_sell_price =", "= int(self.calcul_data[0][6]) # 과거 일봉 데이터를 조회하면서 120일 이평선보다 주가가 계속 밑에 존재하는지", "abs(int(current_price)) first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력: -6010 first_sell_price = abs(int(first_sell_price)) first_buy_price =", "#종목 주문체결 관련한 이벤트 def signal_login_commConnect(self): self.dynamicCall(\"CommConnect()\") # 로그인 요청 시그널 self.login_event_loop.exec_() #", "stock_quan = int(stock_quan) like_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan = int(like_quan) buy_price = self.dynamicCall(\"GetChejanData(int)\",", "self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520 c = abs(int(c)) d = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num,", "== 1: #잔고 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['종목코드'])[1:] stock_name =", "= str(meme_screen) if code in self.portfolio_stock_dict.keys(): self.portfolio_stock_dict[code].update({\"스크린번호\": str(self.screen_real_stock)}) self.portfolio_stock_dict[code].update({\"주문용스크린번호\": str(self.screen_meme_stock)}) elif code not", "for order_number in self.not_account_stock_dict.keys(): code = self.not_account_stock_dict[order_number]['종목코드'] if code not in screen_overwrite: screen_overwrite.append(code)", "담겨있는 종목들 for code in self.portfolio_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) #", "######### 초기 셋팅 함수들 바로 실행 self.get_ocx_instance() #OCX 방식을 파이썬에 사용할 수 있게", "abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000 first_buy_price = abs(int(first_buy_price)) ######## 새로", "msg_slot(self, sScrNo, sRQName, sTrCode, msg): self.logging.logger.debug(\"스크린: %s, 요청이름: %s, tr코드: %s --- %s\"", "sPrevNext == \"2\": self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit() elif sRQName == \"실시간미체결요청\": rows = self.dynamicCall(\"GetRepeatCnt(QString,", "order_number not in self.not_account_stock_dict.keys(): self.not_account_stock_dict.update({order_number: {}}) self.not_account_stock_dict[order_number].update({\"종목코드\": sCode}) self.not_account_stock_dict[order_number].update({\"주문번호\": order_number}) self.not_account_stock_dict[order_number].update({\"종목명\": stock_name}) self.not_account_stock_dict[order_number].update({\"주문상태\":", ": '' if chegual_price == '': chegual_price = 0 else: chegual_price = int(chegual_price)", "self.screen_my_info = \"2000\" #계좌 관련한 스크린 번호 self.screen_calculation_stock = \"4000\" #계산용 스크린 번호", "break elif int(self.calcul_data[idx][7]) > moving_average_price_prev and idx > 20: # 120일 이평선 위에", "sRQName, sTrCode, msg): self.logging.logger.debug(\"스크린: %s, 요청이름: %s, tr코드: %s --- %s\" %(sScrNo, sRQName,", "self.realType.REALTYPE['주문체결']['현재가']) # 출력: -6000 current_price = abs(int(current_price)) first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매도호가']) # 출력:", "= 0 for value in self.calcul_data[:120]: total_price += int(value[1]) moving_average_price = total_price /", "self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\": b}) self.portfolio_stock_dict[sCode].update({\"전일대비\": c}) self.portfolio_stock_dict[sCode].update({\"등락율\": d}) self.portfolio_stock_dict[sCode].update({\"(최우선)매도호가\": e}) self.portfolio_stock_dict[sCode].update({\"(최우선)매수호가\":", "meme_rate < -5): order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int,", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"수익률(%)\") # 수익률 : -000000001.94", "code_nm.strip() order_no = int(order_no.strip()) order_status = order_status.strip() order_quantity = int(order_quantity.strip()) order_price = int(order_price.strip())", "* self.use_money_percent self.use_money = int(use_money) self.use_money = self.use_money / 4 output_deposit = self.dynamicCall(\"GetCommData(QString,", "# 출력 : +(-)2520 c = abs(int(c)) d = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['등락율'])", "int, QString)\", sTrCode, sRQName, i, \"거래량\") # 출력 : 000070 trading_value = self.dynamicCall(\"GetCommData(QString,", "data = self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName) # [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’,", "account_list = self.dynamicCall(\"GetLoginInfo(QString)\", \"ACCNO\") # 계좌번호 반환 account_num = account_list.split(';')[0] self.account_num = account_num", "order_no in self.not_account_stock_dict: pass else: self.not_account_stock_dict[order_no] = {} self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호':", "= self.portfolio_stock_dict[code]['스크린번호'] fids = self.realType.REALTYPE['주식체결']['체결시간'] self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", screen_num, code, fids, \"1\")", "# dictionary 에 해당 종목이 있나 확인 pass else: self.account_stock_dict[code] = {} code_nm", "self.logging.logger.debug(\"장 시작 전\") elif value == '3': self.logging.logger.debug(\"장 시작\") elif value == \"2\":", "self.not_account_stock_dict[order_number].update({\"현재가\": current_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매도호가\": first_sell_price}) self.not_account_stock_dict[order_number].update({\"(최우선)매수호가\": first_buy_price}) elif int(sGubun) == 1: #잔고 account_num =", "* from config.log_class import * # from config.slack import * class Kiwoom(QAxWidget): def", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문상태\") # 접수,확인,체결 order_quantity = self.dynamicCall(\"GetCommData(QString,", "= int(self.screen_meme_stock) if (cnt % 50) == 0: temp_screen += 1 self.screen_real_stock =", "걸쳐있는 것 확인\") bottom_stock_price = True check_price = int(self.calcul_data[0][6]) # 과거 일봉 데이터를", "True: if moving_average_price > moving_average_price_prev and check_price > prev_price: self.logging.logger.debug(\"포착된 이평선의 가격이 오늘자", "self.calcul_data[idx:120+idx]: total_price += int(value[1]) moving_average_price_prev = total_price / 120 if moving_average_price_prev <= int(self.calcul_data[idx][6])", "sCode in self.account_stock_dict.keys() and sCode not in self.jango_dict.keys(): asd = self.account_stock_dict[sCode] meme_rate =", "-000000001.94 current_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") # 현재가", "if code not in screen_overwrite: screen_overwrite.append(code) # 스크린번호 할당 cnt = 0 for", "슬롯 연결 self.signal_login_commConnect() #로그인 요청 시그널 포함 self.get_account_info() #계좌번호 가져오기 self.detail_account_info() #예수금 요청", "if sRQName == \"예수금상세현황요청\": deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0,", "자동화 프로그램 동작\", text=\"주식 자동화 프로그램이 동작 되었습니다.\" ) def get_ocx_instance(self): self.setControl(\"KHOPENAPI.KHOpenAPICtrl.1\") #", "# 출럭 : 한국기업평가 stock_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "이평선을 그릴만큼의 데이터가 있는지 체크 if self.calcul_data == None or len(self.calcul_data) < 120:", "not_concluded_account(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num) self.dynamicCall(\"SetInputValue(QString, QString)\", \"체결구분\", \"1\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"매매구분\",", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['현재가']) # 출력 : +(-)2520 b = abs(int(b)) c =", "시그널 / 슬롯 모음 self.real_event_slot() # 실시간 이벤트 시그널 / 슬롯 연결 self.signal_login_commConnect()", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"현재가\") # 현재가 : 000000003450", "QString)\", \"매매구분\", \"0\") self.dynamicCall(\"CommRqData(QString, QString, int, QString)\", \"실시간미체결요청\", \"opt10075\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def", "self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, fid) if value == '0': self.logging.logger.debug(\"장 시작 전\") elif value", "self.use_money = self.use_money / 4 output_deposit = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "매입가:%s - 수익률: %s - 현재가: %s\" % ( code, code_nm, stock_quantity, buy_price,", "# [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’, ‘현재가’, ’거래량’,", "self.logging.logger.debug(\"3시30분 장 종료\") for code in self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000) self.file_delete()", "QString, int, QString)\", sTrCode, sRQName, i, \"체결량\") code = code.strip() code_nm = code_nm.strip()", "sTrCode, sRQName) # [[‘’, ‘현재가’, ‘거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’. ‘’], [‘’,", "내용을 읽어 오겠다는 뜻이다. lines = f.readlines() #파일에 있는 내용들이 모두 읽어와 진다.", "파이썬에 사용할 수 있게 변환해 주는 함수 self.event_slots() # 키움과 연결하기 위한 시그널", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문상태']) # 출력: 접수, 확인, 체결 order_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문수량']) # 출력", "= self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['총매입가']) # 계좌에 있는 종목의 총매입가 total_buy_price = int(total_buy_price) meme_gubun =", "sTrCode, sRQName, i, \"미체결수량\") ok_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "sRQName, i, \"매입금액\") possible_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매매가능수량\")", "pass_success = True if pass_success == True: self.logging.logger.debug(\"조건부 통과됨\") code_nm = self.dynamicCall(\"GetMasterCodeName(QString)\", code)", "order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결 종목 : %s \" % self.not_account_stock_dict[order_no]) self.detail_account_info_event_loop.exit()", "종목 정보 가져오기 self.portfolio_stock_dict = {} self.jango_dict = {} ######################## ########### 종목 분석", "#총수익률(%) ######################################## ######## 종목 정보 가져오기 self.portfolio_stock_dict = {} self.jango_dict = {} ########################", "못함\") price_top_moving = False break elif int(self.calcul_data[idx][7]) > moving_average_price_prev and idx > 20:", "self.account_stock_dict = {} self.not_account_stock_dict = {} self.deposit = 0 #예수금 self.use_money = 0", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"매입금액\") possible_quantity = self.dynamicCall(\"GetCommData(QString, QString, int,", "120일 이평선 위에 있는 구간 존재 self.logging.logger.debug(\"120일치 이평선 위에 있는 구간 확인됨\") price_top_moving", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['누적거래량']) # 출력 : 240124 h = abs(int(h)) i", "\"종목코드\") code_nm = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") order_no =", "* # from config.slack import * class Kiwoom(QAxWidget): def __init__(self): super().__init__() self.realType =", "바로 실행 self.get_ocx_instance() #OCX 방식을 파이썬에 사용할 수 있게 변환해 주는 함수 self.event_slots()", "self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결 종목 :", "= code.strip() # data = self.dynamicCall(\"GetCommDataEx(QString, QString)\", sTrCode, sRQName) # [[‘’, ‘현재가’, ‘거래량’,", "if (cnt % 50) == 0: temp_screen += 1 self.screen_real_stock = str(temp_screen) if", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0, \"출금가능금액\") self.output_deposit = int(output_deposit) self.logging.logger.debug(\"예수금", "# 출력 : 000070 high_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "total_price += int(value[1]) moving_average_price = total_price / 120 # 오늘자 주가가 120일 이평선에", "이평선의 가격이 오늘자 이평선 가격보다 낮은 것 확인\") self.logging.logger.debug(\"포착된 부분의 저가가 오늘자 주가의", "self.logging.logger.debug(\"sPreNext : %s\" % sPrevNext) print(\"계좌에 가지고 있는 종목은 %s \" % rows)", "######################## ########### 종목 분석 용 self.calcul_data = [] ########################################## ####### 요청 스크린 번호", "not_meme_list: code = self.not_account_stock_dict[order_num][\"종목코드\"] meme_price = self.not_account_stock_dict[order_num]['주문가격'] not_quantity = self.not_account_stock_dict[order_num]['미체결수량'] order_gubun = self.not_account_stock_dict[order_num]['주문구분']", "#실시간 수신 관련 함수 self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", self.screen_start_stop_real, '', self.realType.REALTYPE['장시작시간']['장운영구분'], \"0\") for", "읽어와진다. if line != \"\": ls = line.split(\"\\t\") stock_code = ls[0] stock_name =", "self.portfolio_stock_dict.keys(): self.dynamicCall(\"SetRealRemove(QString, QString)\", self.portfolio_stock_dict[code]['스크린번호'], code) QTest.qWait(5000) self.file_delete() self.calculator_fnc() sys.exit() elif sRealType == \"주식체결\":", "buy_price}) self.account_stock_dict[code].update({\"수익률(%)\": learn_rate}) self.account_stock_dict[code].update({\"현재가\": current_price}) self.account_stock_dict[code].update({\"매입금액\": total_chegual_price}) self.account_stock_dict[code].update({'매매가능수량' : possible_quantity}) self.logging.logger.debug(\"sPreNext : %s\"", "QString)\", sTrCode, sRQName, i, \"매입금액\") possible_quantity = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName,", "실패\") not_meme_list = list(self.not_account_stock_dict) for order_num in not_meme_list: code = self.not_account_stock_dict[order_num][\"종목코드\"] meme_price =", "000000000054100 learn_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"수익률(%)\") # 수익률", "== \"매수\" and not_quantity > 0 and e > meme_price: order_success = self.dynamicCall(", "rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in range(rows): code = self.dynamicCall(\"GetCommData(QString,", "QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"], self.account_num, 3, code, 0, 0, self.realType.SENDTYPE['거래구분']['지정가'], order_num] ) if order_success", "for line in lines: #줄바꿈된 내용들이 한줄 씩 읽어와진다. if line != \"\":", "%s\" % cnt) for i in range(cnt): data = [] current_price = self.dynamicCall(\"GetCommData(QString,", "sTrCode, sRQName, 0, \"예수금\") self.deposit = int(deposit) use_money = float(self.deposit) * self.use_money_percent self.use_money", "import * # from config.slack import * class Kiwoom(QAxWidget): def __init__(self): super().__init__() self.realType", "저장된 api 모듈 불러오기 def event_slots(self): self.OnEventConnect.connect(self.login_slot) # 로그인 관련 이벤트 self.OnReceiveTrData.connect(self.trdata_slot) #", "= False else: # 120일 이평선의 최근 가격 구함 total_price = 0 for", "주가가 120일 이평선에 걸쳐있는지 확인 bottom_stock_price = False check_price = None if int(self.calcul_data[0][7])", "RealType() self.logging = Logging() # self.slack = Slack() #슬랙 동작 #print(\"kiwoom() class start.", "\"수정주가구분\", \"1\") if date != None: self.dynamicCall(\"SetInputValue(QString, QString)\", \"기준일자\", date) self.dynamicCall(\"CommRqData(QString, QString, int,", "err_code): self.logging.logger.debug(errors(err_code)[1]) #로그인 처리가 완료됐으면 이벤트 루프를 종료한다. self.login_event_loop.exit() def get_account_info(self): account_list =", "self.dynamicCall(\"DisconnectRealData(QString)\", sScrNo) # 스크린번호 연결 끊기 def get_code_list_by_market(self, market_code): ''' 종목코드 리스트 받기", "self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"종목명\") # 출럭 : 한국기업평가 stock_quantity", ": 3 order_quan = int(order_quan) order_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문가격']) # 출력: 21000 order_price", "계좌 관련된 변수 self.account_stock_dict = {} self.not_account_stock_dict = {} self.deposit = 0 #예수금", "self.portfolio_stock_dict[sCode].update({\"시가\": j}) self.portfolio_stock_dict[sCode].update({\"저가\": k}) if sCode in self.account_stock_dict.keys() and sCode not in self.jango_dict.keys():", "데이터 얻어오기 def realdata_slot(self, sCode, sRealType, sRealData): if sRealType == \"장시작시간\": fid =", "확인 pass else: self.account_stock_dict[code] = {} code_nm = code_nm.strip() stock_quantity = int(stock_quantity.strip()) buy_price", "실시간 이벤트 시그널 / 슬롯 연결 self.signal_login_commConnect() #로그인 요청 시그널 포함 self.get_account_info() #계좌번호", "self.deposit = int(deposit) use_money = float(self.deposit) * self.use_money_percent self.use_money = int(use_money) self.use_money =", "high_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"고가\") # 출력 :", "int(stock_quan) like_quan = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['주문가능수량']) like_quan = int(like_quan) buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['매입단가']) buy_price", "self.jango_dict[sCode].update({\"(최우선)매도호가\": first_sell_price}) self.jango_dict[sCode].update({\"(최우선)매수호가\": first_buy_price}) if stock_quan == 0: del self.jango_dict[sCode] #송수신 메세지 get", "for code in self.account_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) #미체결에 있는 종목들", "= \"6000\" #종목별 할당할 주문용스크린 번호 self.screen_start_stop_real = \"1000\" #장 시작/종료 실시간 스크린번호", "# 출력 : 000070 trading_value = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i,", "동시호가로 넘어감\") elif value == \"4\": self.logging.logger.debug(\"3시30분 장 종료\") for code in self.portfolio_stock_dict.keys():", "current_price)) if code in self.account_stock_dict: # dictionary 에 해당 종목이 있나 확인 pass", "int, QString)\", sTrCode, sRQName, 0, \"종목코드\") code = code.strip() # data = self.dynamicCall(\"GetCommDataEx(QString,", "if sPrevNext == \"2\": self.detail_account_mystock(sPrevNext=\"2\") else: self.detail_account_info_event_loop.exit() elif sRQName == \"실시간미체결요청\": rows =", "####### event loop를 실행하기 위한 변수모음 self.login_event_loop = QEventLoop() #로그인 요청용 이벤트루프 self.detail_account_info_event_loop", "pass_success = False else: # 120일 이평선의 최근 가격 구함 total_price = 0", "int, QString)\", sTrCode, sRQName, 0, \"출금가능금액\") self.output_deposit = int(output_deposit) self.logging.logger.debug(\"예수금 : %s\" %", "pass else: self.not_account_stock_dict[order_no] = {} self.not_account_stock_dict[order_no].update({'종목코드': code}) self.not_account_stock_dict[order_no].update({'종목명': code_nm}) self.not_account_stock_dict[order_no].update({'주문번호': order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status})", "번호 self.screen_calculation_stock = \"4000\" #계산용 스크린 번호 self.screen_real_stock = \"5000\" #종목별 할당할 스크린", "#미체결에 있는 종목들 for order_number in self.not_account_stock_dict.keys(): code = self.not_account_stock_dict[order_number]['종목코드'] if code not", "\"비밀번호\", \"0000\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"비밀번호입력매체구분\", \"00\") self.dynamicCall(\"SetInputValue(QString, QString)\", \"조회구분\", \"1\") self.dynamicCall(\"CommRqData(QString, QString, int,", "= self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"고가\") # 출력 : 000070", "== True: moving_average_price_prev = 0 price_top_moving = False idx = 1 while True:", "= self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"매수취소\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"],", "import * class Kiwoom(QAxWidget): def __init__(self): super().__init__() self.realType = RealType() self.logging = Logging()", "종목들 for code in self.account_stock_dict.keys(): if code not in screen_overwrite: screen_overwrite.append(code) #미체결에 있는", "else: self.logging.logger.debug(\"매도주문 전달 실패\") elif d > 2.0 and sCode not in self.jango_dict:", "int, QString)\", sTrCode, sRQName, i, \"주문상태\") # 접수,확인,체결 order_quantity = self.dynamicCall(\"GetCommData(QString, QString, int,", "encoding=\"utf8\") # \"r\"을 인자로 던져주면 파일 내용을 읽어 오겠다는 뜻이다. lines = f.readlines()", "연결하기 위한 시그널 / 슬롯 모음 self.real_event_slot() # 실시간 이벤트 시그널 / 슬롯", "= int(ok_quantity.strip()) if order_no in self.not_account_stock_dict: pass else: self.not_account_stock_dict[order_no] = {} self.not_account_stock_dict[order_no].update({'종목코드': code})", "self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity}) self.logging.logger.debug(\"미체결", "code, code_nm, stock_quantity, buy_price, learn_rate, current_price)) if code in self.account_stock_dict: # dictionary 에", "출력 : 240124 h = abs(int(h)) i = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['고가']) #", "data.append(high_price.strip()) data.append(low_price.strip()) data.append(\"\") self.calcul_data.append(data.copy()) if sPrevNext == \"2\": self.day_kiwoom_db(code=code, sPrevNext=sPrevNext) else: self.logging.logger.debug(\"총 일수", "%s\" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate)) rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i", "int(sGubun) == 0: #주문체결 account_num = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['계좌번호']) sCode = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['종목코드'])[1:] stock_name", "스크린 번호 self.screen_real_stock = \"5000\" #종목별 할당할 스크린 번호 self.screen_meme_stock = \"6000\" #종목별", "self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000 first_buy_price = abs(int(first_buy_price)) ######## 새로 들어온 주문이면 주문번호", "int, QString)\", \"예수금상세현황요청\", \"opw00001\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\",", "elif sRQName == \"계좌평가잔고내역요청\": total_buy_money = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, 0,", "int, QString)\", sTrCode, sRQName, i, \"매매가능수량\") self.logging.logger.debug(\"종목코드: %s - 종목명: %s - 보유수량:", "self.realType.REALTYPE['주문체결']['미체결수량']) # 출력: 15, default: 0 not_chegual_quan = int(not_chegual_quan) order_gubun = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['주문구분'])", "sRQName, i, \"매입가\") # 매입가 : 000000000054100 learn_rate = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "%s - %s\" % (total_buy_money, total_profit_loss_money, total_profit_loss_rate)) rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName)", "self.dynamicCall(\"SetRealReg(QString, QString, QString, QString)\", screen_num, code, fids, \"1\") self.slack.notification( pretext=\"주식자동화 프로그램 동작\", title=\"주식", "3:장시작, 4,8:장종료(30분), 9:장마감) value = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, fid) if value == '0':", "jd = self.jango_dict[sCode] meme_rate = (b - jd['매입단가']) / jd['매입단가'] * 100 if", "‘현재가’, ’거래량’, ‘거래대금’, ‘날짜’, ‘시가’, ‘고가’, ‘저가’, ‘’]. […]] cnt = self.dynamicCall(\"GetRepeatCnt(QString, QString)\",", "= int(order_quantity.strip()) order_price = int(order_price.strip()) order_gubun = order_gubun.strip().lstrip('+').lstrip('-') not_quantity = int(not_quantity.strip()) ok_quantity =", "order_success = self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"신규매수\",", "240124 h = abs(int(h)) i = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['고가']) # 출력 :", "QString)\", \"예수금상세현황요청\", \"opw00001\", sPrevNext, self.screen_my_info) self.detail_account_info_event_loop.exec_() def detail_account_mystock(self, sPrevNext=\"0\"): self.dynamicCall(\"SetInputValue(QString, QString)\", \"계좌번호\", self.account_num)", "total_price = 0 for value in self.calcul_data[idx:120+idx]: total_price += int(value[1]) moving_average_price_prev = total_price", "분석 용 self.calcul_data = [] ########################################## ####### 요청 스크린 번호 self.screen_my_info = \"2000\"", "sRQName == \"실시간미체결요청\": rows = self.dynamicCall(\"GetRepeatCnt(QString, QString)\", sTrCode, sRQName) for i in range(rows):", "QString)\", sTrCode, sRQName, i, \"거래대금\") # 출력 : 000070 date = self.dynamicCall(\"GetCommData(QString, QString,", "int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515 f = abs(int(f)) g = self.dynamicCall(\"GetCommRealData(QString,", "+(-)2530 k = abs(int(k)) if sCode not in self.portfolio_stock_dict: self.portfolio_stock_dict.update({sCode:{}}) self.portfolio_stock_dict[sCode].update({\"체결시간\": a}) self.portfolio_stock_dict[sCode].update({\"현재가\":", "QString, int, QString)\", sTrCode, sRQName, i, \"거래대금\") # 출력 : 000070 date =", "연결 self.signal_login_commConnect() #로그인 요청 시그널 포함 self.get_account_info() #계좌번호 가져오기 self.detail_account_info() #예수금 요청 시그널", "sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515 f = abs(int(f)) g = self.dynamicCall(\"GetCommRealData(QString, int)\",", "#종목별 할당할 스크린 번호 self.screen_meme_stock = \"6000\" #종목별 할당할 주문용스크린 번호 self.screen_start_stop_real =", "#3.6초마다 딜레이를 준다. self.dynamicCall(\"SetInputValue(QString, QString)\", \"종목코드\", code) self.dynamicCall(\"SetInputValue(QString, QString)\", \"수정주가구분\", \"1\") if date", "0, \"총수익률(%)\") self.total_profit_loss_rate = float(total_profit_loss_rate) self.logging.logger.debug(\"계좌평가잔고내역요청 싱글데이터 : %s - %s - %s\"", "얻어오기 def realdata_slot(self, sCode, sRealType, sRealData): if sRealType == \"장시작시간\": fid = self.realType.REALTYPE[sRealType]['장운영구분']", "= self.realType.REALTYPE[sRealType]['장운영구분'] # (0:장시작전, 2:장종료전(20분), 3:장시작, 4,8:장종료(30분), 9:장마감) value = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode,", "동작\", title=\"주식 자동화 프로그램 동작\", fallback=\"주식 자동화 프로그램 동작\", text=\"주식 자동화 프로그램이 동작", "출력: -6010 first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['주문체결']['(최우선)매수호가']) # 출력: -6000 first_buy_price", "%s - 현재가: %s\" % ( code, code_nm, stock_quantity, buy_price, learn_rate, current_price)) if", "self.logging.logger.debug(\"포착된 부분의 저가가 오늘자 주가의 고가보다 낮은지 확인\") pass_success = True if pass_success", "sRQName) self.logging.logger.debug(\"남은 일자 수 %s\" % cnt) for i in range(cnt): data =", "sRQName, i, \"주문가격\") order_gubun = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\", sTrCode, sRQName, i, \"주문구분\")", "= self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['전일대비']) # 출력 : +(-)2520 c = abs(int(c)) d", "sRQName, i, \"시가\") # 출력 : 000070 high_price = self.dynamicCall(\"GetCommData(QString, QString, int, QString)\",", "e = abs(int(e)) f = self.dynamicCall(\"GetCommRealData(QString, int)\", sCode, self.realType.REALTYPE[sRealType]['(최우선)매수호가']) # 출력 : +(-)2515", "= QEventLoop() # 예수금 요청용 이벤트루프 self.calculator_event_loop = QEventLoop() ######################################### ########### 전체 종목", "order_no}) self.not_account_stock_dict[order_no].update({'주문상태': order_status}) self.not_account_stock_dict[order_no].update({'주문수량': order_quantity}) self.not_account_stock_dict[order_no].update({'주문가격': order_price}) self.not_account_stock_dict[order_no].update({'주문구분': order_gubun}) self.not_account_stock_dict[order_no].update({'미체결수량': not_quantity}) self.not_account_stock_dict[order_no].update({'체결량': ok_quantity})", "msg): self.logging.logger.debug(\"스크린: %s, 요청이름: %s, tr코드: %s --- %s\" %(sScrNo, sRQName, sTrCode, msg))", "/ %s : KOSDAQ Stock Code : %s is updating... \" % (idx", "if asd['매매가능수량'] > 0 and (meme_rate > 5 or meme_rate < -5): order_success", "# self.slack = Slack() #슬랙 동작 #print(\"kiwoom() class start. \") self.logging.logger.debug(\"Kiwoom() class start.\")", "int, QString)\", sTrCode, sRQName, i, \"수익률(%)\") # 수익률 : -000000001.94 current_price = self.dynamicCall(\"GetCommData(QString,", "알파벳 A는 장내주식, J는 ELW종목, Q는 ETN종목 code = code.strip()[1:] code_nm = self.dynamicCall(\"GetCommData(QString,", "self.logging.logger.debug(\"코스닥 갯수 %s \" % len(code_list)) for idx, code in enumerate(code_list): self.dynamicCall(\"DisconnectRealData(QString)\", self.screen_calculation_stock)", "price_top_moving = False break elif int(self.calcul_data[idx][7]) > moving_average_price_prev and idx > 20: #", "= self.dynamicCall( \"SendOrder(QString, QString, QString, int, QString, int, int, QString, QString)\", [\"신규매수\", self.portfolio_stock_dict[sCode][\"주문용스크린번호\"],", "= self.realType.REALTYPE['매도수구분'][meme_gubun] first_sell_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매도호가']) first_sell_price = abs(int(first_sell_price)) first_buy_price = self.dynamicCall(\"GetChejanData(int)\", self.realType.REALTYPE['잔고']['(최우선)매수호가'])" ]
[ "return json.dumps(value) def python_value(self, value: str) -> Dict: \"\"\" Convert a string value", "the sqlite version is lower, we roll a less-optimal replacement. \"\"\" from typing", "JSONField(pw.CharField): # pragma: no cover \"\"\" Polyfill class to provide a JSON field", "have a native JSON extension, which we use. If the sqlite version is", "native JSON extension, which we use. If the sqlite version is lower, we", "string \"\"\" return json.dumps(value) def python_value(self, value: str) -> Dict: \"\"\" Convert a", "-> Dict: \"\"\" Convert a string value from the database back to what", "value: str) -> Dict: \"\"\" Convert a string value from the database back", "Dict: \"\"\" Convert a string value from the database back to what it", ":param value: The value to store :return: The JSON string \"\"\" return json.dumps(value)", "cover \"\"\" Polyfill class to provide a JSON field \"\"\" def db_value(self, value:", "of sqlite (>= 3.9.0) have a native JSON extension, which we use. If", "\"\"\" return cast(Dict, json.loads(value)) else: # pragma: no cover # flake8: noqa from", "Dict) -> str: \"\"\" Convert a value to a string for storage in", "`CharField` :param value: The value to store :return: The JSON string \"\"\" return", "json.loads(value)) else: # pragma: no cover # flake8: noqa from playhouse.sqlite_ext import JSONField", "from the database back to what it was. :param value: The string value", "to store :return: The JSON string \"\"\" return json.dumps(value) def python_value(self, value: str)", "-> str: \"\"\" Convert a value to a string for storage in a", "is lower, we roll a less-optimal replacement. \"\"\" from typing import Dict, cast", "field \"\"\" def db_value(self, value: Dict) -> str: \"\"\" Convert a value to", "3.9.0) have a native JSON extension, which we use. If the sqlite version", "Polyfill class to provide a JSON field \"\"\" def db_value(self, value: Dict) ->", "The value to store :return: The JSON string \"\"\" return json.dumps(value) def python_value(self,", ":param value: The string value :return: Parsed JSON value \"\"\" return cast(Dict, json.loads(value))", "JSON extension, which we use. If the sqlite version is lower, we roll", "a less-optimal replacement. \"\"\" from typing import Dict, cast import peewee as pw", "SQLite. Newer versions of sqlite (>= 3.9.0) have a native JSON extension, which", "Newer versions of sqlite (>= 3.9.0) have a native JSON extension, which we", "provide a JSON field \"\"\" def db_value(self, value: Dict) -> str: \"\"\" Convert", "as pw from peewee import sqlite3 import json if sqlite3.sqlite_version_info < (3, 9,", "type: ignore class JSONField(pw.CharField): # pragma: no cover \"\"\" Polyfill class to provide", "we use. If the sqlite version is lower, we roll a less-optimal replacement.", "string for storage in a `CharField` :param value: The value to store :return:", "value \"\"\" return cast(Dict, json.loads(value)) else: # pragma: no cover # flake8: noqa", "versions of sqlite (>= 3.9.0) have a native JSON extension, which we use.", "ignore class JSONField(pw.CharField): # pragma: no cover \"\"\" Polyfill class to provide a", "value to store :return: The JSON string \"\"\" return json.dumps(value) def python_value(self, value:", "\"\"\" return json.dumps(value) def python_value(self, value: str) -> Dict: \"\"\" Convert a string", "sqlite (>= 3.9.0) have a native JSON extension, which we use. If the", "back to what it was. :param value: The string value :return: Parsed JSON", "JSON string \"\"\" return json.dumps(value) def python_value(self, value: str) -> Dict: \"\"\" Convert", "field in SQLite. Newer versions of sqlite (>= 3.9.0) have a native JSON", "replacement. \"\"\" from typing import Dict, cast import peewee as pw from peewee", "in SQLite. Newer versions of sqlite (>= 3.9.0) have a native JSON extension,", "for a JSON field in SQLite. Newer versions of sqlite (>= 3.9.0) have", "the database back to what it was. :param value: The string value :return:", "import sqlite3 import json if sqlite3.sqlite_version_info < (3, 9, 0): # type: ignore", "value: Dict) -> str: \"\"\" Convert a value to a string for storage", "import json if sqlite3.sqlite_version_info < (3, 9, 0): # type: ignore class JSONField(pw.CharField):", "if sqlite3.sqlite_version_info < (3, 9, 0): # type: ignore class JSONField(pw.CharField): # pragma:", "it was. :param value: The string value :return: Parsed JSON value \"\"\" return", "string value from the database back to what it was. :param value: The", "class JSONField(pw.CharField): # pragma: no cover \"\"\" Polyfill class to provide a JSON", "sqlite version is lower, we roll a less-optimal replacement. \"\"\" from typing import", "str: \"\"\" Convert a value to a string for storage in a `CharField`", "str) -> Dict: \"\"\" Convert a string value from the database back to", "pw from peewee import sqlite3 import json if sqlite3.sqlite_version_info < (3, 9, 0):", "# type: ignore class JSONField(pw.CharField): # pragma: no cover \"\"\" Polyfill class to", "storage in a `CharField` :param value: The value to store :return: The JSON", "If the sqlite version is lower, we roll a less-optimal replacement. \"\"\" from", "Parsed JSON value \"\"\" return cast(Dict, json.loads(value)) else: # pragma: no cover #", "cast import peewee as pw from peewee import sqlite3 import json if sqlite3.sqlite_version_info", "The string value :return: Parsed JSON value \"\"\" return cast(Dict, json.loads(value)) else: #", "a native JSON extension, which we use. If the sqlite version is lower,", "lower, we roll a less-optimal replacement. \"\"\" from typing import Dict, cast import", "a `CharField` :param value: The value to store :return: The JSON string \"\"\"", "which we use. If the sqlite version is lower, we roll a less-optimal", "\"\"\" from typing import Dict, cast import peewee as pw from peewee import", "Convert a string value from the database back to what it was. :param", "json if sqlite3.sqlite_version_info < (3, 9, 0): # type: ignore class JSONField(pw.CharField): #", "a string for storage in a `CharField` :param value: The value to store", "sqlite3.sqlite_version_info < (3, 9, 0): # type: ignore class JSONField(pw.CharField): # pragma: no", "class to provide a JSON field \"\"\" def db_value(self, value: Dict) -> str:", "0): # type: ignore class JSONField(pw.CharField): # pragma: no cover \"\"\" Polyfill class", "we roll a less-optimal replacement. \"\"\" from typing import Dict, cast import peewee", "(>= 3.9.0) have a native JSON extension, which we use. If the sqlite", "a string value from the database back to what it was. :param value:", "cast(Dict, json.loads(value)) else: # pragma: no cover # flake8: noqa from playhouse.sqlite_ext import", "pragma: no cover \"\"\" Polyfill class to provide a JSON field \"\"\" def", "peewee as pw from peewee import sqlite3 import json if sqlite3.sqlite_version_info < (3,", "sqlite3 import json if sqlite3.sqlite_version_info < (3, 9, 0): # type: ignore class", "a value to a string for storage in a `CharField` :param value: The", "The JSON string \"\"\" return json.dumps(value) def python_value(self, value: str) -> Dict: \"\"\"", "\"\"\" def db_value(self, value: Dict) -> str: \"\"\" Convert a value to a", "value :return: Parsed JSON value \"\"\" return cast(Dict, json.loads(value)) else: # pragma: no", "9, 0): # type: ignore class JSONField(pw.CharField): # pragma: no cover \"\"\" Polyfill", "value: The value to store :return: The JSON string \"\"\" return json.dumps(value) def", "roll a less-optimal replacement. \"\"\" from typing import Dict, cast import peewee as", "from peewee import sqlite3 import json if sqlite3.sqlite_version_info < (3, 9, 0): #", "def python_value(self, value: str) -> Dict: \"\"\" Convert a string value from the", "version is lower, we roll a less-optimal replacement. \"\"\" from typing import Dict,", "to provide a JSON field \"\"\" def db_value(self, value: Dict) -> str: \"\"\"", "less-optimal replacement. \"\"\" from typing import Dict, cast import peewee as pw from", "\"\"\" Convert a value to a string for storage in a `CharField` :param", "database back to what it was. :param value: The string value :return: Parsed", "\"\"\" Convert a string value from the database back to what it was.", "python_value(self, value: str) -> Dict: \"\"\" Convert a string value from the database", "<reponame>paulgessinger/kong \"\"\" Polyfill for a JSON field in SQLite. Newer versions of sqlite", "in a `CharField` :param value: The value to store :return: The JSON string", "Polyfill for a JSON field in SQLite. Newer versions of sqlite (>= 3.9.0)", "extension, which we use. If the sqlite version is lower, we roll a", "\"\"\" Polyfill class to provide a JSON field \"\"\" def db_value(self, value: Dict)", "value to a string for storage in a `CharField` :param value: The value", "JSON field in SQLite. Newer versions of sqlite (>= 3.9.0) have a native", "< (3, 9, 0): # type: ignore class JSONField(pw.CharField): # pragma: no cover", ":return: Parsed JSON value \"\"\" return cast(Dict, json.loads(value)) else: # pragma: no cover", "from typing import Dict, cast import peewee as pw from peewee import sqlite3", "JSON field \"\"\" def db_value(self, value: Dict) -> str: \"\"\" Convert a value", "Convert a value to a string for storage in a `CharField` :param value:", "for storage in a `CharField` :param value: The value to store :return: The", "value: The string value :return: Parsed JSON value \"\"\" return cast(Dict, json.loads(value)) else:", "pragma: no cover # flake8: noqa from playhouse.sqlite_ext import JSONField # type: ignore", "Dict, cast import peewee as pw from peewee import sqlite3 import json if", "JSON value \"\"\" return cast(Dict, json.loads(value)) else: # pragma: no cover # flake8:", "use. If the sqlite version is lower, we roll a less-optimal replacement. \"\"\"", "string value :return: Parsed JSON value \"\"\" return cast(Dict, json.loads(value)) else: # pragma:", "what it was. :param value: The string value :return: Parsed JSON value \"\"\"", "else: # pragma: no cover # flake8: noqa from playhouse.sqlite_ext import JSONField #", "\"\"\" Polyfill for a JSON field in SQLite. Newer versions of sqlite (>=", "typing import Dict, cast import peewee as pw from peewee import sqlite3 import", "def db_value(self, value: Dict) -> str: \"\"\" Convert a value to a string", "peewee import sqlite3 import json if sqlite3.sqlite_version_info < (3, 9, 0): # type:", "a JSON field in SQLite. Newer versions of sqlite (>= 3.9.0) have a", "(3, 9, 0): # type: ignore class JSONField(pw.CharField): # pragma: no cover \"\"\"", "# pragma: no cover \"\"\" Polyfill class to provide a JSON field \"\"\"", "json.dumps(value) def python_value(self, value: str) -> Dict: \"\"\" Convert a string value from", "a JSON field \"\"\" def db_value(self, value: Dict) -> str: \"\"\" Convert a", "to a string for storage in a `CharField` :param value: The value to", ":return: The JSON string \"\"\" return json.dumps(value) def python_value(self, value: str) -> Dict:", "was. :param value: The string value :return: Parsed JSON value \"\"\" return cast(Dict,", "to what it was. :param value: The string value :return: Parsed JSON value", "return cast(Dict, json.loads(value)) else: # pragma: no cover # flake8: noqa from playhouse.sqlite_ext", "import peewee as pw from peewee import sqlite3 import json if sqlite3.sqlite_version_info <", "# pragma: no cover # flake8: noqa from playhouse.sqlite_ext import JSONField # type:", "no cover \"\"\" Polyfill class to provide a JSON field \"\"\" def db_value(self,", "import Dict, cast import peewee as pw from peewee import sqlite3 import json", "db_value(self, value: Dict) -> str: \"\"\" Convert a value to a string for", "value from the database back to what it was. :param value: The string", "store :return: The JSON string \"\"\" return json.dumps(value) def python_value(self, value: str) ->" ]
[ "write(self, responses): await self.config.get_mysql_pool_cli() # init mysql pool miss_count = 0 original_length =", "len(json.dumps(test_response[k])) < len(json.dumps(v)): test_response[k] = v elif v is not None and test_response[k]", "assume this field (the shortest length) * 4 <= the longest length(8192) elif", "list() curr_sql = '(' for field in self.key_fields: if field in self.auto_increment_keys and", "bool): field_type = \"BOOLEAN\" elif isinstance(value, int): field_type = \"BIGINT\" elif isinstance(value, float):", "\",\" sql_keys = sql_keys[:-1] + \")\" sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s VALUES \" % (self.config.table,", "None: await self.create_table(responses) # check field await self.config.cursor.execute(\"DESC %s\" % (self.config.table, )) results", "self.auto_increment_keys and field not in each: need_specific_sql = True continue val = each[field]", "field_type = \"TEXT\" elif isinstance(value, bool): field_type = \"BOOLEAN\" elif isinstance(value, int): field_type", "filtered %d item, total write %d item\" % (self.config.name, self.total_miss_count, self.success_count)) def __enter__(self):", "write %d item\" % (self.config.name, self.total_miss_count, self.success_count)) def __enter__(self): return self def finish_once(self,", "field: self.auto_increment_keys.add(field[0]) fields = set(i[0] for i in results) self.key_fields = list(i[0] for", "await self.config.get_mysql_pool_cli() # init mysql pool miss_count = 0 original_length = len(responses) if", "set(responses[0].keys()) difference_set = real_keys.difference(fields) if difference_set: # real keys not subset of fields", "+= 1 if try_time < self.config.max_retry: logging.error(\"retry: %d, %s\" % (try_time, str(e))) await", "in keys: sql_keys += each_sql_key + \",\" sql_keys = sql_keys[:-1] + \")\" sql_without_auto_increment_keys.append(\"REPLACE", "key: field_type = \"BIGINT\" elif value is None: field_type = \"TEXT\" elif key", ") or isinstance(value, dict) or isinstance(value, list): field_type = \"TEXT\" elif isinstance(value, bool):", "test_response = dict() for response in responses[:50]: for k, v in response.items(): if", "= \"\"\" CREATE TABLE `%s` ( \"\"\" % (self.config.table, ) first_field = True", "= \"TEXT\" elif key in (\"content\", ) or isinstance(value, dict) or isinstance(value, list):", "sql_without_auto_increment_keys = list() for each in responses: need_specific_sql = False keys = list()", "= curr_sql[:-1] + '),\\n' if need_specific_sql: sql_keys = \"(\" for each_sql_key in keys:", "item, filtered %d item\" % (self.config.name, original_length - miss_count, miss_count)) async def table_check(self,", "+ \",\" curr_sql = curr_sql[:-1] + '),\\n' if need_specific_sql: sql_keys = \"(\" for", "field_type = \"TEXT\" else: length = len(value) * 4 if length < 256:", "if difference_set: # real keys not subset of fields raise ValueError(\"Field %s not", "2048: field_type = \"TEXT\" else: length = len(value) * 4 if length <", "= 0 self.success_count = 0 self.table_checked = False self.key_fields = list() self.auto_increment_keys =", "(\"\\t\" if first_field else \"\\t\\t\") + \"`%s` %s\" % (key, field_type) if key", "real_keys = set(responses[0].keys()) difference_set = real_keys.difference(fields) if difference_set: # real keys not subset", "field_type = \"TEXT\" elif key in (\"content\", ) or isinstance(value, dict) or isinstance(value,", "(`id`) ) ENGINE=InnoDB DEFAULT CHARSET=%s \"\"\" % (self.config.charset, ) sql += tail_sql logging.info(\"Creating", "self.key_fields = list() self.auto_increment_keys = set() async def write(self, responses): await self.config.get_mysql_pool_cli() #", "keys: sql_keys += each_sql_key + \",\" sql_keys = sql_keys[:-1] + \")\" sql_without_auto_increment_keys.append(\"REPLACE INTO", "original_length logging.info(\"%s write %d item, filtered %d item\" % (self.config.name, original_length - miss_count,", "isinstance(value, int): field_type = \"BIGINT\" elif isinstance(value, float): field_type = \"DOUBLE\" # varchar", "be less than 65536 / 8 = 8192 # assume this field (the", "write done, total filtered %d item, total write %d item\" % (self.config.name, self.total_miss_count,", "to write if not self.table_checked: await self.table_check(responses) if await self.perform_write(responses): self.finish_once(miss_count, original_length) def", "finish_once(self, miss_count, original_length): self.total_miss_count += miss_count self.success_count += original_length logging.info(\"%s write %d item,", "dict) or isinstance(val, list): val = json.dumps(val) if val is None: curr_sql +=", "= False self.key_fields = list() self.auto_increment_keys = set() async def write(self, responses): await", "so length should be less than 65536 / 8 = 8192 # assume", "config self.total_miss_count = 0 self.success_count = 0 self.table_checked = False self.key_fields = list()", "try_time < self.config.max_retry: try: ret_sql = \"\" if normal_sql: ret_sql += sql +", "None: field_type = \"TEXT\" elif key in (\"content\", ) or isinstance(value, dict) or", "else \"\\t\\t\") + \"`%s` %s\" % (key, field_type) if key == \"id\": sql", "self.total_miss_count = 0 self.success_count = 0 self.table_checked = False self.key_fields = list() self.auto_increment_keys", "+= \",\\n\" if first_field: first_field = False tail_sql = \"\"\" \\tPRIMARY KEY (`id`)", "sql = sql[:-2] try_time = 0 while try_time < self.config.max_retry: try: ret_sql =", "= set(responses[0].keys()) difference_set = real_keys.difference(fields) if difference_set: # real keys not subset of", "field (the shortest length) * 4 <= the longest length(8192) elif len(value) >", "list() for i in responses: i = self.config.filter(i) if i: target_responses.append(i) else: miss_count", "first_field: first_field = False tail_sql = \"\"\" \\tPRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT", "True except Exception as e: try_time += 1 if try_time < self.config.max_retry: logging.error(\"retry:", "def table_check(self, responses): await self.config.cursor.execute(\"SHOW TABLES LIKE '%s'\" % (self.config.table, )) result =", "original_length) def __exit__(self, exc_type, exc_val, exc_tb): self.config.free_resource() logging.info(\"%s write done, total filtered %d", "is None: field_type = \"TEXT\" elif key in (\"content\", ) or isinstance(value, dict)", "in results) real_keys = set(responses[0].keys()) difference_set = real_keys.difference(fields) if difference_set: # real keys", "= dict() for response in responses[:50]: for k, v in response.items(): if k", "responses: self.finish_once(miss_count, original_length) return # After filtered, still have responses to write if", "write if not self.table_checked: await self.table_check(responses) if await self.perform_write(responses): self.finish_once(miss_count, original_length) def __exit__(self,", "INTO %s%s VALUES \" % (self.config.table, sql_keys) + curr_sql[:-2]) else: normal_sql = True", "= 0 while try_time < self.config.max_retry: try: ret_sql = \"\" if normal_sql: ret_sql", "elif key in (\"content\", ) or isinstance(value, dict) or isinstance(value, list): field_type =", "each in responses: need_specific_sql = False keys = list() curr_sql = '(' for", "sql_keys) + curr_sql[:-2]) else: normal_sql = True sql += curr_sql sql = sql[:-2]", "isinstance(value, bool): field_type = \"BOOLEAN\" elif isinstance(value, int): field_type = \"BIGINT\" elif isinstance(value,", "in key: field_type = \"BIGINT\" elif value is None: field_type = \"TEXT\" elif", "\" \"total write %d items, total filtered: %d items, reason: %s\" % (self.config.name,", "if self.config.filter: target_responses = list() for i in responses: i = self.config.filter(i) if", "None and test_response[k] < v: test_response[k] = v sql = \"\"\" CREATE TABLE", "exc_val, exc_tb): self.config.free_resource() logging.info(\"%s write done, total filtered %d item, total write %d", "have responses to write if not self.table_checked: await self.table_check(responses) if await self.perform_write(responses): self.finish_once(miss_count,", "def __exit__(self, exc_type, exc_val, exc_tb): self.config.free_resource() logging.info(\"%s write done, total filtered %d item,", "json.dumps(val) if val is None: curr_sql += 'NULL,' else: curr_sql += repr(val) +", "import traceback from .BaseWriter import BaseWriter class MySQLWriter(BaseWriter): def __init__(self, config): super().__init__() self.config", "def finish_once(self, miss_count, original_length): self.total_miss_count += miss_count self.success_count += original_length logging.info(\"%s write %d", "async def table_check(self, responses): await self.config.cursor.execute(\"SHOW TABLES LIKE '%s'\" % (self.config.table, )) result", "+= 'NULL,' else: curr_sql += repr(val) + \",\" curr_sql = curr_sql[:-1] + '),\\n'", "while try_time < self.config.max_retry: try: ret_sql = \"\" if normal_sql: ret_sql += sql", "= await self.config.cursor.fetchall() for field in results: if \"auto_increment\" in field: self.auto_increment_keys.add(field[0]) fields", "DEFAULT CHARSET=%s \"\"\" % (self.config.charset, ) sql += tail_sql logging.info(\"Creating table: %s\\n%s\", self.config.table,", "(length, ) sql += (\"\\t\" if first_field else \"\\t\\t\") + \"`%s` %s\" %", "\";\\n\" if sql_without_auto_increment_keys: ret_sql += \";\\n\".join(sql_without_auto_increment_keys) ret_sql += \";\" await self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit()", "ret_sql += \";\\n\".join(sql_without_auto_increment_keys) ret_sql += \";\" await self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit() return True except", "up MySQL writer: %s, After retry: %d times, still fail to write, \"", "in responses: need_specific_sql = False keys = list() curr_sql = '(' for field", "else: miss_count += 1 responses = target_responses if not responses: self.finish_once(miss_count, original_length) return", "(try_time, str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else: logging.error(\"Give up MySQL writer: %s, After retry:", "i = self.config.filter(i) if i: target_responses.append(i) else: miss_count += 1 responses = target_responses", "= \"TEXT\" elif isinstance(value, bool): field_type = \"BOOLEAN\" elif isinstance(value, int): field_type =", "len(value) > 2048: field_type = \"TEXT\" else: length = len(value) * 4 if", "%s VALUES \" % (self.config.table, ) normal_sql = False sql_without_auto_increment_keys = list() for", "and field not in each: need_specific_sql = True continue val = each[field] keys.append(field)", "async def perform_write(self, responses): sql = \"REPLACE INTO %s VALUES \" % (self.config.table,", "self.table_checked: await self.table_check(responses) if await self.perform_write(responses): self.finish_once(miss_count, original_length) def __exit__(self, exc_type, exc_val, exc_tb):", "results = await self.config.cursor.fetchall() for field in results: if \"auto_increment\" in field: self.auto_increment_keys.add(field[0])", "self.config.max_retry: try: ret_sql = \"\" if normal_sql: ret_sql += sql + \";\\n\" if", "await self.perform_write(responses): self.finish_once(miss_count, original_length) def __exit__(self, exc_type, exc_val, exc_tb): self.config.free_resource() logging.info(\"%s write done,", "= set(i[0] for i in results) self.key_fields = list(i[0] for i in results)", "len(value) * 4 if length < 256: length = 256 field_type = \"VARCHAR(%d)\"", "real keys not subset of fields raise ValueError(\"Field %s not in MySQL Table:", "result = await self.config.cursor.fetchone() if result is None: await self.create_table(responses) # check field", "True sql += curr_sql sql = sql[:-2] try_time = 0 while try_time <", "256: length = 256 field_type = \"VARCHAR(%d)\" % (length, ) sql += (\"\\t\"", "% (self.config.table, ) first_field = True for key, value in responses[0].items(): if \"Count\"", "__exit__(self, exc_type, exc_val, exc_tb): self.config.free_resource() logging.info(\"%s write done, total filtered %d item, total", "import logging import traceback from .BaseWriter import BaseWriter class MySQLWriter(BaseWriter): def __init__(self, config):", "= \"REPLACE INTO %s VALUES \" % (self.config.table, ) normal_sql = False sql_without_auto_increment_keys", "\",\\n\" if first_field: first_field = False tail_sql = \"\"\" \\tPRIMARY KEY (`id`) )", "CREATE TABLE `%s` ( \"\"\" % (self.config.table, ) first_field = True for key,", "dict) or isinstance(v, list): if len(json.dumps(test_response[k])) < len(json.dumps(v)): test_response[k] = v elif v", "def create_table(self, responses): test_response = dict() for response in responses[:50]: for k, v", "self.config.table)) self.table_checked = True async def create_table(self, responses): test_response = dict() for response", "except Exception as e: try_time += 1 if try_time < self.config.max_retry: logging.error(\"retry: %d,", "dict() for response in responses[:50]: for k, v in response.items(): if k not", "total write %d item\" % (self.config.name, self.total_miss_count, self.success_count)) def __enter__(self): return self def", "if \"Count\" in key: field_type = \"BIGINT\" elif value is None: field_type =", "self.success_count)) def __enter__(self): return self def finish_once(self, miss_count, original_length): self.total_miss_count += miss_count self.success_count", "test_response[k] = v elif isinstance(v, dict) or isinstance(v, list): if len(json.dumps(test_response[k])) < len(json.dumps(v)):", "await self.config.cursor.execute(\"DESC %s\" % (self.config.table, )) results = await self.config.cursor.fetchall() for field in", "= v elif v is not None and test_response[k] < v: test_response[k] =", "elif value is None: field_type = \"TEXT\" elif key in (\"content\", ) or", "= \"\" if normal_sql: ret_sql += sql + \";\\n\" if sql_without_auto_increment_keys: ret_sql +=", "sql += \",\\n\" if first_field: first_field = False tail_sql = \"\"\" \\tPRIMARY KEY", "else: sql += \",\\n\" if first_field: first_field = False tail_sql = \"\"\" \\tPRIMARY", "logging.info(\"table created\") async def perform_write(self, responses): sql = \"REPLACE INTO %s VALUES \"", "done, total filtered %d item, total write %d item\" % (self.config.name, self.total_miss_count, self.success_count))", "False tail_sql = \"\"\" \\tPRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=%s \"\"\" %", "list): val = json.dumps(val) if val is None: curr_sql += 'NULL,' else: curr_sql", "= \"VARCHAR(%d)\" % (length, ) sql += (\"\\t\" if first_field else \"\\t\\t\") +", "filtered %d item\" % (self.config.name, original_length - miss_count, miss_count)) async def table_check(self, responses):", "result is None: await self.create_table(responses) # check field await self.config.cursor.execute(\"DESC %s\" % (self.config.table,", "self.config.cursor.execute(\"SHOW TABLES LIKE '%s'\" % (self.config.table, )) result = await self.config.cursor.fetchone() if result", "difference_set: # real keys not subset of fields raise ValueError(\"Field %s not in", "most 65536 bytes, utf8 occupy 1-8 bytes per character, # so length should", "\"BOOLEAN\" elif isinstance(value, int): field_type = \"BIGINT\" elif isinstance(value, float): field_type = \"DOUBLE\"", "\"\"\" % (self.config.charset, ) sql += tail_sql logging.info(\"Creating table: %s\\n%s\", self.config.table, sql) await", "< len(json.dumps(v)): test_response[k] = v elif v is not None and test_response[k] <", "%d item\" % (self.config.name, self.total_miss_count, self.success_count)) def __enter__(self): return self def finish_once(self, miss_count,", "%s\\n%s\", self.config.table, sql) await self.config.cursor.execute(sql) await self.config.connection.commit() logging.info(\"table created\") async def perform_write(self, responses):", "fields = set(i[0] for i in results) self.key_fields = list(i[0] for i in", "init mysql pool miss_count = 0 original_length = len(responses) if self.config.filter: target_responses =", "logging.error(\"Give up MySQL writer: %s, After retry: %d times, still fail to write,", "0 original_length = len(responses) if self.config.filter: target_responses = list() for i in responses:", "isinstance(v, list): if len(json.dumps(test_response[k])) < len(json.dumps(v)): test_response[k] = v elif v is not", "<= the longest length(8192) elif len(value) > 2048: field_type = \"TEXT\" else: length", "class MySQLWriter(BaseWriter): def __init__(self, config): super().__init__() self.config = config self.total_miss_count = 0 self.success_count", "dict) or isinstance(value, list): field_type = \"TEXT\" elif isinstance(value, bool): field_type = \"BOOLEAN\"", "as e: try_time += 1 if try_time < self.config.max_retry: logging.error(\"retry: %d, %s\" %", "INTO %s VALUES \" % (self.config.table, ) normal_sql = False sql_without_auto_increment_keys = list()", "from .BaseWriter import BaseWriter class MySQLWriter(BaseWriter): def __init__(self, config): super().__init__() self.config = config", "field in self.key_fields: if field in self.auto_increment_keys and field not in each: need_specific_sql", "table_check(self, responses): await self.config.cursor.execute(\"SHOW TABLES LIKE '%s'\" % (self.config.table, )) result = await", "item, total write %d item\" % (self.config.name, self.total_miss_count, self.success_count)) def __enter__(self): return self", "if field in self.auto_increment_keys and field not in each: need_specific_sql = True continue", "if \"auto_increment\" in field: self.auto_increment_keys.add(field[0]) fields = set(i[0] for i in results) self.key_fields", "elif isinstance(v, dict) or isinstance(v, list): if len(json.dumps(test_response[k])) < len(json.dumps(v)): test_response[k] = v", "responses: need_specific_sql = False keys = list() curr_sql = '(' for field in", "self.auto_increment_keys.add(field[0]) fields = set(i[0] for i in results) self.key_fields = list(i[0] for i", "self.table_check(responses) if await self.perform_write(responses): self.finish_once(miss_count, original_length) def __exit__(self, exc_type, exc_val, exc_tb): self.config.free_resource() logging.info(\"%s", "None: test_response[k] = v elif isinstance(v, dict) or isinstance(v, list): if len(json.dumps(test_response[k])) <", "else: logging.error(\"Give up MySQL writer: %s, After retry: %d times, still fail to", "\";\" await self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit() return True except Exception as e: try_time +=", "= 0 self.table_checked = False self.key_fields = list() self.auto_increment_keys = set() async def", "%d, %s\" % (try_time, str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else: logging.error(\"Give up MySQL writer:", "if val is None: curr_sql += 'NULL,' else: curr_sql += repr(val) + \",\"", "if first_field else \"\\t\\t\") + \"`%s` %s\" % (key, field_type) if key ==", "= True async def create_table(self, responses): test_response = dict() for response in responses[:50]:", "test_response: test_response[k] = v elif test_response[k] is None: test_response[k] = v elif isinstance(v,", "await self.table_check(responses) if await self.perform_write(responses): self.finish_once(miss_count, original_length) def __exit__(self, exc_type, exc_val, exc_tb): self.config.free_resource()", "(\"content\", ) or isinstance(value, dict) or isinstance(value, list): field_type = \"TEXT\" elif isinstance(value,", "len(responses) if self.config.filter: target_responses = list() for i in responses: i = self.config.filter(i)", "field_type = \"BOOLEAN\" elif isinstance(value, int): field_type = \"BIGINT\" elif isinstance(value, float): field_type", "if need_specific_sql: sql_keys = \"(\" for each_sql_key in keys: sql_keys += each_sql_key +", "utf8 occupy 1-8 bytes per character, # so length should be less than", "MySQL Table: %s\" % (str(difference_set), self.config.table)) self.table_checked = True async def create_table(self, responses):", "mysql pool miss_count = 0 original_length = len(responses) if self.config.filter: target_responses = list()", "list): if len(json.dumps(test_response[k])) < len(json.dumps(v)): test_response[k] = v elif v is not None", "+= \";\\n\".join(sql_without_auto_increment_keys) ret_sql += \";\" await self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit() return True except Exception", "logging import traceback from .BaseWriter import BaseWriter class MySQLWriter(BaseWriter): def __init__(self, config): super().__init__()", "v is not None and test_response[k] < v: test_response[k] = v sql =", "= True for key, value in responses[0].items(): if \"Count\" in key: field_type =", "(str(difference_set), self.config.table)) self.table_checked = True async def create_table(self, responses): test_response = dict() for", "self.total_miss_count += miss_count self.success_count += original_length logging.info(\"%s write %d item, filtered %d item\"", "val is None: curr_sql += 'NULL,' else: curr_sql += repr(val) + \",\" curr_sql", "responses[:50]: for k, v in response.items(): if k not in test_response: test_response[k] =", "elif isinstance(value, float): field_type = \"DOUBLE\" # varchar can store at most 65536", "responses): sql = \"REPLACE INTO %s VALUES \" % (self.config.table, ) normal_sql =", "self def finish_once(self, miss_count, original_length): self.total_miss_count += miss_count self.success_count += original_length logging.info(\"%s write", "= len(value) * 4 if length < 256: length = 256 field_type =", "length = 256 field_type = \"VARCHAR(%d)\" % (length, ) sql += (\"\\t\" if", "isinstance(val, dict) or isinstance(val, list): val = json.dumps(val) if val is None: curr_sql", "True for key, value in responses[0].items(): if \"Count\" in key: field_type = \"BIGINT\"", "table: %s\\n%s\", self.config.table, sql) await self.config.cursor.execute(sql) await self.config.connection.commit() logging.info(\"table created\") async def perform_write(self,", "super().__init__() self.config = config self.total_miss_count = 0 self.success_count = 0 self.table_checked = False", "self.config.max_retry: logging.error(\"retry: %d, %s\" % (try_time, str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else: logging.error(\"Give up", "each[field] keys.append(field) if isinstance(val, dict) or isinstance(val, list): val = json.dumps(val) if val", "else: curr_sql += repr(val) + \",\" curr_sql = curr_sql[:-1] + '),\\n' if need_specific_sql:", "test_response[k] = v elif v is not None and test_response[k] < v: test_response[k]", "at most 65536 bytes, utf8 occupy 1-8 bytes per character, # so length", "+= \";\" await self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit() return True except Exception as e: try_time", "sql + \";\\n\" if sql_without_auto_increment_keys: ret_sql += \";\\n\".join(sql_without_auto_increment_keys) ret_sql += \";\" await self.config.cursor.execute(ret_sql)", "'(' for field in self.key_fields: if field in self.auto_increment_keys and field not in", "import BaseWriter class MySQLWriter(BaseWriter): def __init__(self, config): super().__init__() self.config = config self.total_miss_count =", "if sql_without_auto_increment_keys: ret_sql += \";\\n\".join(sql_without_auto_increment_keys) ret_sql += \";\" await self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit() return", "results) self.key_fields = list(i[0] for i in results) real_keys = set(responses[0].keys()) difference_set =", "not responses: self.finish_once(miss_count, original_length) return # After filtered, still have responses to write", "self.config.free_resource() logging.info(\"%s write done, total filtered %d item, total write %d item\" %", "results: if \"auto_increment\" in field: self.auto_increment_keys.add(field[0]) fields = set(i[0] for i in results)", "logging.info(\"%s write %d item, filtered %d item\" % (self.config.name, original_length - miss_count, miss_count))", "+= 1 responses = target_responses if not responses: self.finish_once(miss_count, original_length) return # After", "try: ret_sql = \"\" if normal_sql: ret_sql += sql + \";\\n\" if sql_without_auto_increment_keys:", "% (try_time, str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else: logging.error(\"Give up MySQL writer: %s, After", "%s\" % (self.config.table, )) results = await self.config.cursor.fetchall() for field in results: if", "ret_sql += sql + \";\\n\" if sql_without_auto_increment_keys: ret_sql += \";\\n\".join(sql_without_auto_increment_keys) ret_sql += \";\"", "set() async def write(self, responses): await self.config.get_mysql_pool_cli() # init mysql pool miss_count =", "self.config.cursor.execute(sql) await self.config.connection.commit() logging.info(\"table created\") async def perform_write(self, responses): sql = \"REPLACE INTO", "\"\" if normal_sql: ret_sql += sql + \";\\n\" if sql_without_auto_increment_keys: ret_sql += \";\\n\".join(sql_without_auto_increment_keys)", "miss_count)) async def table_check(self, responses): await self.config.cursor.execute(\"SHOW TABLES LIKE '%s'\" % (self.config.table, ))", "%s not in MySQL Table: %s\" % (str(difference_set), self.config.table)) self.table_checked = True async", "0 while try_time < self.config.max_retry: try: ret_sql = \"\" if normal_sql: ret_sql +=", "sql += tail_sql logging.info(\"Creating table: %s\\n%s\", self.config.table, sql) await self.config.cursor.execute(sql) await self.config.connection.commit() logging.info(\"table", "self.config.cursor.fetchall() for field in results: if \"auto_increment\" in field: self.auto_increment_keys.add(field[0]) fields = set(i[0]", "(self.config.table, sql_keys) + curr_sql[:-2]) else: normal_sql = True sql += curr_sql sql =", "length < 256: length = 256 field_type = \"VARCHAR(%d)\" % (length, ) sql", "tail_sql = \"\"\" \\tPRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=%s \"\"\" % (self.config.charset,", "write %d item, filtered %d item\" % (self.config.name, original_length - miss_count, miss_count)) async", "= list(i[0] for i in results) real_keys = set(responses[0].keys()) difference_set = real_keys.difference(fields) if", ") sql += (\"\\t\" if first_field else \"\\t\\t\") + \"`%s` %s\" % (key,", "sql[:-2] try_time = 0 while try_time < self.config.max_retry: try: ret_sql = \"\" if", "% (self.config.table, ) normal_sql = False sql_without_auto_increment_keys = list() for each in responses:", "length should be less than 65536 / 8 = 8192 # assume this", "def __enter__(self): return self def finish_once(self, miss_count, original_length): self.total_miss_count += miss_count self.success_count +=", "self.config.cursor.execute(\"DESC %s\" % (self.config.table, )) results = await self.config.cursor.fetchall() for field in results:", "self.finish_once(miss_count, original_length) def __exit__(self, exc_type, exc_val, exc_tb): self.config.free_resource() logging.info(\"%s write done, total filtered", "<reponame>markqiu/idataapi-transform import json import asyncio import random import logging import traceback from .BaseWriter", "or isinstance(value, list): field_type = \"TEXT\" elif isinstance(value, bool): field_type = \"BOOLEAN\" elif", "less than 65536 / 8 = 8192 # assume this field (the shortest", "= '(' for field in self.key_fields: if field in self.auto_increment_keys and field not", "for each_sql_key in keys: sql_keys += each_sql_key + \",\" sql_keys = sql_keys[:-1] +", "await self.config.cursor.fetchall() for field in results: if \"auto_increment\" in field: self.auto_increment_keys.add(field[0]) fields =", "ENGINE=InnoDB DEFAULT CHARSET=%s \"\"\" % (self.config.charset, ) sql += tail_sql logging.info(\"Creating table: %s\\n%s\",", "for field in self.key_fields: if field in self.auto_increment_keys and field not in each:", "- miss_count, miss_count)) async def table_check(self, responses): await self.config.cursor.execute(\"SHOW TABLES LIKE '%s'\" %", "+= repr(val) + \",\" curr_sql = curr_sql[:-1] + '),\\n' if need_specific_sql: sql_keys =", "import json import asyncio import random import logging import traceback from .BaseWriter import", "8 = 8192 # assume this field (the shortest length) * 4 <=", "+ curr_sql[:-2]) else: normal_sql = True sql += curr_sql sql = sql[:-2] try_time", "in responses: i = self.config.filter(i) if i: target_responses.append(i) else: miss_count += 1 responses", "responses: i = self.config.filter(i) if i: target_responses.append(i) else: miss_count += 1 responses =", "i in responses: i = self.config.filter(i) if i: target_responses.append(i) else: miss_count += 1", "first_field = True for key, value in responses[0].items(): if \"Count\" in key: field_type", "# varchar can store at most 65536 bytes, utf8 occupy 1-8 bytes per", "length = len(value) * 4 if length < 256: length = 256 field_type", "self.table_checked = False self.key_fields = list() self.auto_increment_keys = set() async def write(self, responses):", "self.config.cursor.connection.commit() return True except Exception as e: try_time += 1 if try_time <", "try_time < self.config.max_retry: logging.error(\"retry: %d, %s\" % (try_time, str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else:", "(self.config.table, ) normal_sql = False sql_without_auto_increment_keys = list() for each in responses: need_specific_sql", "miss_count = 0 original_length = len(responses) if self.config.filter: target_responses = list() for i", "= list() curr_sql = '(' for field in self.key_fields: if field in self.auto_increment_keys", "bytes, utf8 occupy 1-8 bytes per character, # so length should be less", "sql_keys += each_sql_key + \",\" sql_keys = sql_keys[:-1] + \")\" sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s", "if length < 256: length = 256 field_type = \"VARCHAR(%d)\" % (length, )", "in results) self.key_fields = list(i[0] for i in results) real_keys = set(responses[0].keys()) difference_set", "not in MySQL Table: %s\" % (str(difference_set), self.config.table)) self.table_checked = True async def", "real_keys.difference(fields) if difference_set: # real keys not subset of fields raise ValueError(\"Field %s", "longest length(8192) elif len(value) > 2048: field_type = \"TEXT\" else: length = len(value)", ") normal_sql = False sql_without_auto_increment_keys = list() for each in responses: need_specific_sql =", "field in self.auto_increment_keys and field not in each: need_specific_sql = True continue val", "responses to write if not self.table_checked: await self.table_check(responses) if await self.perform_write(responses): self.finish_once(miss_count, original_length)", "writer: %s, After retry: %d times, still fail to write, \" \"total write", "can store at most 65536 bytes, utf8 occupy 1-8 bytes per character, #", "sql += \" NOT NULL,\\n\" else: sql += \",\\n\" if first_field: first_field =", "else: length = len(value) * 4 if length < 256: length = 256", "self.config = config self.total_miss_count = 0 self.success_count = 0 self.table_checked = False self.key_fields", "test_response[k] is None: test_response[k] = v elif isinstance(v, dict) or isinstance(v, list): if", "+ \",\" sql_keys = sql_keys[:-1] + \")\" sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s VALUES \" %", "+= curr_sql sql = sql[:-2] try_time = 0 while try_time < self.config.max_retry: try:", "ret_sql += \";\" await self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit() return True except Exception as e:", "shortest length) * 4 <= the longest length(8192) elif len(value) > 2048: field_type", "> 2048: field_type = \"TEXT\" else: length = len(value) * 4 if length", ")) result = await self.config.cursor.fetchone() if result is None: await self.create_table(responses) # check", "for response in responses[:50]: for k, v in response.items(): if k not in", "curr_sql += repr(val) + \",\" curr_sql = curr_sql[:-1] + '),\\n' if need_specific_sql: sql_keys", "to write, \" \"total write %d items, total filtered: %d items, reason: %s\"", "i in results) real_keys = set(responses[0].keys()) difference_set = real_keys.difference(fields) if difference_set: # real", "logging.info(\"%s write done, total filtered %d item, total write %d item\" % (self.config.name,", "= \"BIGINT\" elif value is None: field_type = \"TEXT\" elif key in (\"content\",", "(self.config.name, original_length - miss_count, miss_count)) async def table_check(self, responses): await self.config.cursor.execute(\"SHOW TABLES LIKE", "import asyncio import random import logging import traceback from .BaseWriter import BaseWriter class", ") first_field = True for key, value in responses[0].items(): if \"Count\" in key:", "original_length): self.total_miss_count += miss_count self.success_count += original_length logging.info(\"%s write %d item, filtered %d", "= list() for each in responses: need_specific_sql = False keys = list() curr_sql", "exc_type, exc_val, exc_tb): self.config.free_resource() logging.info(\"%s write done, total filtered %d item, total write", "i in results) self.key_fields = list(i[0] for i in results) real_keys = set(responses[0].keys())", "%d item, filtered %d item\" % (self.config.name, original_length - miss_count, miss_count)) async def", ") sql += tail_sql logging.info(\"Creating table: %s\\n%s\", self.config.table, sql) await self.config.cursor.execute(sql) await self.config.connection.commit()", "v sql = \"\"\" CREATE TABLE `%s` ( \"\"\" % (self.config.table, ) first_field", "+= \" NOT NULL,\\n\" else: sql += \",\\n\" if first_field: first_field = False", "VALUES \" % (self.config.table, ) normal_sql = False sql_without_auto_increment_keys = list() for each", "'NULL,' else: curr_sql += repr(val) + \",\" curr_sql = curr_sql[:-1] + '),\\n' if", "in field: self.auto_increment_keys.add(field[0]) fields = set(i[0] for i in results) self.key_fields = list(i[0]", "= sql_keys[:-1] + \")\" sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s VALUES \" % (self.config.table, sql_keys) +", "val = each[field] keys.append(field) if isinstance(val, dict) or isinstance(val, list): val = json.dumps(val)", "await self.config.cursor.fetchone() if result is None: await self.create_table(responses) # check field await self.config.cursor.execute(\"DESC", "(key, field_type) if key == \"id\": sql += \" NOT NULL,\\n\" else: sql", "str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else: logging.error(\"Give up MySQL writer: %s, After retry: %d", "else: normal_sql = True sql += curr_sql sql = sql[:-2] try_time = 0", "= 8192 # assume this field (the shortest length) * 4 <= the", "not in test_response: test_response[k] = v elif test_response[k] is None: test_response[k] = v", "elif test_response[k] is None: test_response[k] = v elif isinstance(v, dict) or isinstance(v, list):", "field_type) if key == \"id\": sql += \" NOT NULL,\\n\" else: sql +=", "original_length = len(responses) if self.config.filter: target_responses = list() for i in responses: i", "= v sql = \"\"\" CREATE TABLE `%s` ( \"\"\" % (self.config.table, )", "random import logging import traceback from .BaseWriter import BaseWriter class MySQLWriter(BaseWriter): def __init__(self,", "def __init__(self, config): super().__init__() self.config = config self.total_miss_count = 0 self.success_count = 0", "\"\"\" CREATE TABLE `%s` ( \"\"\" % (self.config.table, ) first_field = True for", "in test_response: test_response[k] = v elif test_response[k] is None: test_response[k] = v elif", "filtered, still have responses to write if not self.table_checked: await self.table_check(responses) if await", "% (self.config.charset, ) sql += tail_sql logging.info(\"Creating table: %s\\n%s\", self.config.table, sql) await self.config.cursor.execute(sql)", "= sql[:-2] try_time = 0 while try_time < self.config.max_retry: try: ret_sql = \"\"", "target_responses if not responses: self.finish_once(miss_count, original_length) return # After filtered, still have responses", "= v elif isinstance(v, dict) or isinstance(v, list): if len(json.dumps(test_response[k])) < len(json.dumps(v)): test_response[k]", "and test_response[k] < v: test_response[k] = v sql = \"\"\" CREATE TABLE `%s`", "for each in responses: need_specific_sql = False keys = list() curr_sql = '('", "first_field = False tail_sql = \"\"\" \\tPRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=%s", "try_time = 0 while try_time < self.config.max_retry: try: ret_sql = \"\" if normal_sql:", "elif isinstance(value, bool): field_type = \"BOOLEAN\" elif isinstance(value, int): field_type = \"BIGINT\" elif", "field_type = \"VARCHAR(%d)\" % (length, ) sql += (\"\\t\" if first_field else \"\\t\\t\")", "repr(val) + \",\" curr_sql = curr_sql[:-1] + '),\\n' if need_specific_sql: sql_keys = \"(\"", "for k, v in response.items(): if k not in test_response: test_response[k] = v", "# check field await self.config.cursor.execute(\"DESC %s\" % (self.config.table, )) results = await self.config.cursor.fetchall()", "\"\"\" \\tPRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=%s \"\"\" % (self.config.charset, ) sql", "= self.config.filter(i) if i: target_responses.append(i) else: miss_count += 1 responses = target_responses if", "responses): await self.config.get_mysql_pool_cli() # init mysql pool miss_count = 0 original_length = len(responses)", "json import asyncio import random import logging import traceback from .BaseWriter import BaseWriter", "NULL,\\n\" else: sql += \",\\n\" if first_field: first_field = False tail_sql = \"\"\"", "`%s` ( \"\"\" % (self.config.table, ) first_field = True for key, value in", "< self.config.max_retry: logging.error(\"retry: %d, %s\" % (try_time, str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else: logging.error(\"Give", "v elif v is not None and test_response[k] < v: test_response[k] = v", "= set() async def write(self, responses): await self.config.get_mysql_pool_cli() # init mysql pool miss_count", "__init__(self, config): super().__init__() self.config = config self.total_miss_count = 0 self.success_count = 0 self.table_checked", "asyncio import random import logging import traceback from .BaseWriter import BaseWriter class MySQLWriter(BaseWriter):", "= \"DOUBLE\" # varchar can store at most 65536 bytes, utf8 occupy 1-8", "first_field else \"\\t\\t\") + \"`%s` %s\" % (key, field_type) if key == \"id\":", "each_sql_key in keys: sql_keys += each_sql_key + \",\" sql_keys = sql_keys[:-1] + \")\"", "isinstance(v, dict) or isinstance(v, list): if len(json.dumps(test_response[k])) < len(json.dumps(v)): test_response[k] = v elif", "occupy 1-8 bytes per character, # so length should be less than 65536", ".BaseWriter import BaseWriter class MySQLWriter(BaseWriter): def __init__(self, config): super().__init__() self.config = config self.total_miss_count", "self.create_table(responses) # check field await self.config.cursor.execute(\"DESC %s\" % (self.config.table, )) results = await", "field await self.config.cursor.execute(\"DESC %s\" % (self.config.table, )) results = await self.config.cursor.fetchall() for field", "await self.config.cursor.execute(sql) await self.config.connection.commit() logging.info(\"table created\") async def perform_write(self, responses): sql = \"REPLACE", "each: need_specific_sql = True continue val = each[field] keys.append(field) if isinstance(val, dict) or", "pool miss_count = 0 original_length = len(responses) if self.config.filter: target_responses = list() for", "is None: curr_sql += 'NULL,' else: curr_sql += repr(val) + \",\" curr_sql =", "list): field_type = \"TEXT\" elif isinstance(value, bool): field_type = \"BOOLEAN\" elif isinstance(value, int):", "traceback from .BaseWriter import BaseWriter class MySQLWriter(BaseWriter): def __init__(self, config): super().__init__() self.config =", "= 0 original_length = len(responses) if self.config.filter: target_responses = list() for i in", "isinstance(val, list): val = json.dumps(val) if val is None: curr_sql += 'NULL,' else:", "per character, # so length should be less than 65536 / 8 =", "in MySQL Table: %s\" % (str(difference_set), self.config.table)) self.table_checked = True async def create_table(self,", "than 65536 / 8 = 8192 # assume this field (the shortest length)", "store at most 65536 bytes, utf8 occupy 1-8 bytes per character, # so", "if try_time < self.config.max_retry: logging.error(\"retry: %d, %s\" % (try_time, str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep))", "self.key_fields: if field in self.auto_increment_keys and field not in each: need_specific_sql = True", "still have responses to write if not self.table_checked: await self.table_check(responses) if await self.perform_write(responses):", "if first_field: first_field = False tail_sql = \"\"\" \\tPRIMARY KEY (`id`) ) ENGINE=InnoDB", "float): field_type = \"DOUBLE\" # varchar can store at most 65536 bytes, utf8", "sql) await self.config.cursor.execute(sql) await self.config.connection.commit() logging.info(\"table created\") async def perform_write(self, responses): sql =", "\"TEXT\" else: length = len(value) * 4 if length < 256: length =", "TABLES LIKE '%s'\" % (self.config.table, )) result = await self.config.cursor.fetchone() if result is", "def perform_write(self, responses): sql = \"REPLACE INTO %s VALUES \" % (self.config.table, )", "isinstance(value, dict) or isinstance(value, list): field_type = \"TEXT\" elif isinstance(value, bool): field_type =", "%s\" % (try_time, str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else: logging.error(\"Give up MySQL writer: %s,", "+= each_sql_key + \",\" sql_keys = sql_keys[:-1] + \")\" sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s VALUES", "sql = \"\"\" CREATE TABLE `%s` ( \"\"\" % (self.config.table, ) first_field =", "field in results: if \"auto_increment\" in field: self.auto_increment_keys.add(field[0]) fields = set(i[0] for i", "not subset of fields raise ValueError(\"Field %s not in MySQL Table: %s\" %", "% (key, field_type) if key == \"id\": sql += \" NOT NULL,\\n\" else:", "sql_keys = \"(\" for each_sql_key in keys: sql_keys += each_sql_key + \",\" sql_keys", "curr_sql = curr_sql[:-1] + '),\\n' if need_specific_sql: sql_keys = \"(\" for each_sql_key in", "check field await self.config.cursor.execute(\"DESC %s\" % (self.config.table, )) results = await self.config.cursor.fetchall() for", "total filtered %d item, total write %d item\" % (self.config.name, self.total_miss_count, self.success_count)) def", "curr_sql = '(' for field in self.key_fields: if field in self.auto_increment_keys and field", "%s\" % (key, field_type) if key == \"id\": sql += \" NOT NULL,\\n\"", "= True continue val = each[field] keys.append(field) if isinstance(val, dict) or isinstance(val, list):", "item\" % (self.config.name, original_length - miss_count, miss_count)) async def table_check(self, responses): await self.config.cursor.execute(\"SHOW", "\" % (self.config.table, sql_keys) + curr_sql[:-2]) else: normal_sql = True sql += curr_sql", "character, # so length should be less than 65536 / 8 = 8192", "LIKE '%s'\" % (self.config.table, )) result = await self.config.cursor.fetchone() if result is None:", "subset of fields raise ValueError(\"Field %s not in MySQL Table: %s\" % (str(difference_set),", "key in (\"content\", ) or isinstance(value, dict) or isinstance(value, list): field_type = \"TEXT\"", "logging.error(\"retry: %d, %s\" % (try_time, str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else: logging.error(\"Give up MySQL", "( \"\"\" % (self.config.table, ) first_field = True for key, value in responses[0].items():", "= list() self.auto_increment_keys = set() async def write(self, responses): await self.config.get_mysql_pool_cli() # init", "After retry: %d times, still fail to write, \" \"total write %d items,", "= \"\"\" \\tPRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=%s \"\"\" % (self.config.charset, )", "curr_sql[:-2]) else: normal_sql = True sql += curr_sql sql = sql[:-2] try_time =", "\"id\": sql += \" NOT NULL,\\n\" else: sql += \",\\n\" if first_field: first_field", "total filtered: %d items, reason: %s\" % (self.config.name, self.config.max_retry, self.success_count, self.total_miss_count, str(traceback.format_exc()))) return", "None: curr_sql += 'NULL,' else: curr_sql += repr(val) + \",\" curr_sql = curr_sql[:-1]", "difference_set = real_keys.difference(fields) if difference_set: # real keys not subset of fields raise", "(self.config.table, )) result = await self.config.cursor.fetchone() if result is None: await self.create_table(responses) #", "self.key_fields = list(i[0] for i in results) real_keys = set(responses[0].keys()) difference_set = real_keys.difference(fields)", "0 self.table_checked = False self.key_fields = list() self.auto_increment_keys = set() async def write(self,", "self.config.cursor.fetchone() if result is None: await self.create_table(responses) # check field await self.config.cursor.execute(\"DESC %s\"", "= real_keys.difference(fields) if difference_set: # real keys not subset of fields raise ValueError(\"Field", "True continue val = each[field] keys.append(field) if isinstance(val, dict) or isinstance(val, list): val", "+ '),\\n' if need_specific_sql: sql_keys = \"(\" for each_sql_key in keys: sql_keys +=", "\" NOT NULL,\\n\" else: sql += \",\\n\" if first_field: first_field = False tail_sql", "\"BIGINT\" elif isinstance(value, float): field_type = \"DOUBLE\" # varchar can store at most", "+= sql + \";\\n\" if sql_without_auto_increment_keys: ret_sql += \";\\n\".join(sql_without_auto_increment_keys) ret_sql += \";\" await", "\"`%s` %s\" % (key, field_type) if key == \"id\": sql += \" NOT", "isinstance(value, list): field_type = \"TEXT\" elif isinstance(value, bool): field_type = \"BOOLEAN\" elif isinstance(value,", "% (self.config.name, original_length - miss_count, miss_count)) async def table_check(self, responses): await self.config.cursor.execute(\"SHOW TABLES", "if i: target_responses.append(i) else: miss_count += 1 responses = target_responses if not responses:", "TABLE `%s` ( \"\"\" % (self.config.table, ) first_field = True for key, value", "< 256: length = 256 field_type = \"VARCHAR(%d)\" % (length, ) sql +=", "* 4 <= the longest length(8192) elif len(value) > 2048: field_type = \"TEXT\"", "= config self.total_miss_count = 0 self.success_count = 0 self.table_checked = False self.key_fields =", "tail_sql logging.info(\"Creating table: %s\\n%s\", self.config.table, sql) await self.config.cursor.execute(sql) await self.config.connection.commit() logging.info(\"table created\") async", "v: test_response[k] = v sql = \"\"\" CREATE TABLE `%s` ( \"\"\" %", "target_responses = list() for i in responses: i = self.config.filter(i) if i: target_responses.append(i)", "'),\\n' if need_specific_sql: sql_keys = \"(\" for each_sql_key in keys: sql_keys += each_sql_key", "# After filtered, still have responses to write if not self.table_checked: await self.table_check(responses)", "% (self.config.name, self.total_miss_count, self.success_count)) def __enter__(self): return self def finish_once(self, miss_count, original_length): self.total_miss_count", "%d item, total write %d item\" % (self.config.name, self.total_miss_count, self.success_count)) def __enter__(self): return", "the longest length(8192) elif len(value) > 2048: field_type = \"TEXT\" else: length =", "ret_sql = \"\" if normal_sql: ret_sql += sql + \";\\n\" if sql_without_auto_increment_keys: ret_sql", "if isinstance(val, dict) or isinstance(val, list): val = json.dumps(val) if val is None:", "set(i[0] for i in results) self.key_fields = list(i[0] for i in results) real_keys", "= \"BOOLEAN\" elif isinstance(value, int): field_type = \"BIGINT\" elif isinstance(value, float): field_type =", "val = json.dumps(val) if val is None: curr_sql += 'NULL,' else: curr_sql +=", "keys not subset of fields raise ValueError(\"Field %s not in MySQL Table: %s\"", "items, total filtered: %d items, reason: %s\" % (self.config.name, self.config.max_retry, self.success_count, self.total_miss_count, str(traceback.format_exc())))", "key, value in responses[0].items(): if \"Count\" in key: field_type = \"BIGINT\" elif value", ") ENGINE=InnoDB DEFAULT CHARSET=%s \"\"\" % (self.config.charset, ) sql += tail_sql logging.info(\"Creating table:", "if result is None: await self.create_table(responses) # check field await self.config.cursor.execute(\"DESC %s\" %", "response.items(): if k not in test_response: test_response[k] = v elif test_response[k] is None:", "async def create_table(self, responses): test_response = dict() for response in responses[:50]: for k,", "list() for each in responses: need_specific_sql = False keys = list() curr_sql =", "\";\\n\".join(sql_without_auto_increment_keys) ret_sql += \";\" await self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit() return True except Exception as", "responses): await self.config.cursor.execute(\"SHOW TABLES LIKE '%s'\" % (self.config.table, )) result = await self.config.cursor.fetchone()", "\\tPRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=%s \"\"\" % (self.config.charset, ) sql +=", "\" % (self.config.table, ) normal_sql = False sql_without_auto_increment_keys = list() for each in", "+= miss_count self.success_count += original_length logging.info(\"%s write %d item, filtered %d item\" %", "await self.config.connection.commit() logging.info(\"table created\") async def perform_write(self, responses): sql = \"REPLACE INTO %s", "list() self.auto_increment_keys = set() async def write(self, responses): await self.config.get_mysql_pool_cli() # init mysql", "is not None and test_response[k] < v: test_response[k] = v sql = \"\"\"", "varchar can store at most 65536 bytes, utf8 occupy 1-8 bytes per character,", "< self.config.max_retry: try: ret_sql = \"\" if normal_sql: ret_sql += sql + \";\\n\"", "not in each: need_specific_sql = True continue val = each[field] keys.append(field) if isinstance(val,", "\"\"\" % (self.config.table, ) first_field = True for key, value in responses[0].items(): if", "sql += curr_sql sql = sql[:-2] try_time = 0 while try_time < self.config.max_retry:", "asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else: logging.error(\"Give up MySQL writer: %s, After retry: %d times, still", "%d items, total filtered: %d items, reason: %s\" % (self.config.name, self.config.max_retry, self.success_count, self.total_miss_count,", "filtered: %d items, reason: %s\" % (self.config.name, self.config.max_retry, self.success_count, self.total_miss_count, str(traceback.format_exc()))) return False", "= target_responses if not responses: self.finish_once(miss_count, original_length) return # After filtered, still have", "= each[field] keys.append(field) if isinstance(val, dict) or isinstance(val, list): val = json.dumps(val) if", "CHARSET=%s \"\"\" % (self.config.charset, ) sql += tail_sql logging.info(\"Creating table: %s\\n%s\", self.config.table, sql)", "await self.config.cursor.connection.commit() return True except Exception as e: try_time += 1 if try_time", "e: try_time += 1 if try_time < self.config.max_retry: logging.error(\"retry: %d, %s\" % (try_time,", "fields raise ValueError(\"Field %s not in MySQL Table: %s\" % (str(difference_set), self.config.table)) self.table_checked", "< v: test_response[k] = v sql = \"\"\" CREATE TABLE `%s` ( \"\"\"", "self.config.random_max_sleep)) else: logging.error(\"Give up MySQL writer: %s, After retry: %d times, still fail", "False self.key_fields = list() self.auto_increment_keys = set() async def write(self, responses): await self.config.get_mysql_pool_cli()", "'%s'\" % (self.config.table, )) result = await self.config.cursor.fetchone() if result is None: await", "await self.create_table(responses) # check field await self.config.cursor.execute(\"DESC %s\" % (self.config.table, )) results =", "\"TEXT\" elif isinstance(value, bool): field_type = \"BOOLEAN\" elif isinstance(value, int): field_type = \"BIGINT\"", "self.success_count = 0 self.table_checked = False self.key_fields = list() self.auto_increment_keys = set() async", "self.config.table, sql) await self.config.cursor.execute(sql) await self.config.connection.commit() logging.info(\"table created\") async def perform_write(self, responses): sql", "self.auto_increment_keys = set() async def write(self, responses): await self.config.get_mysql_pool_cli() # init mysql pool", "curr_sql[:-1] + '),\\n' if need_specific_sql: sql_keys = \"(\" for each_sql_key in keys: sql_keys", "await self.config.cursor.execute(\"SHOW TABLES LIKE '%s'\" % (self.config.table, )) result = await self.config.cursor.fetchone() if", "continue val = each[field] keys.append(field) if isinstance(val, dict) or isinstance(val, list): val =", "elif v is not None and test_response[k] < v: test_response[k] = v sql", "Exception as e: try_time += 1 if try_time < self.config.max_retry: logging.error(\"retry: %d, %s\"", "need_specific_sql = True continue val = each[field] keys.append(field) if isinstance(val, dict) or isinstance(val,", "keys = list() curr_sql = '(' for field in self.key_fields: if field in", "config): super().__init__() self.config = config self.total_miss_count = 0 self.success_count = 0 self.table_checked =", "KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=%s \"\"\" % (self.config.charset, ) sql += tail_sql", "* 4 if length < 256: length = 256 field_type = \"VARCHAR(%d)\" %", "= \"(\" for each_sql_key in keys: sql_keys += each_sql_key + \",\" sql_keys =", "in response.items(): if k not in test_response: test_response[k] = v elif test_response[k] is", "+= tail_sql logging.info(\"Creating table: %s\\n%s\", self.config.table, sql) await self.config.cursor.execute(sql) await self.config.connection.commit() logging.info(\"table created\")", "v elif isinstance(v, dict) or isinstance(v, list): if len(json.dumps(test_response[k])) < len(json.dumps(v)): test_response[k] =", "is None: await self.create_table(responses) # check field await self.config.cursor.execute(\"DESC %s\" % (self.config.table, ))", "self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit() return True except Exception as e: try_time += 1 if", "(the shortest length) * 4 <= the longest length(8192) elif len(value) > 2048:", "import random import logging import traceback from .BaseWriter import BaseWriter class MySQLWriter(BaseWriter): def", "if key == \"id\": sql += \" NOT NULL,\\n\" else: sql += \",\\n\"", "self.table_checked = True async def create_table(self, responses): test_response = dict() for response in", "normal_sql: ret_sql += sql + \";\\n\" if sql_without_auto_increment_keys: ret_sql += \";\\n\".join(sql_without_auto_increment_keys) ret_sql +=", "\"total write %d items, total filtered: %d items, reason: %s\" % (self.config.name, self.config.max_retry,", "test_response[k] = v sql = \"\"\" CREATE TABLE `%s` ( \"\"\" % (self.config.table,", "\"TEXT\" elif key in (\"content\", ) or isinstance(value, dict) or isinstance(value, list): field_type", "False keys = list() curr_sql = '(' for field in self.key_fields: if field", "list(i[0] for i in results) real_keys = set(responses[0].keys()) difference_set = real_keys.difference(fields) if difference_set:", "1-8 bytes per character, # so length should be less than 65536 /", "self.perform_write(responses): self.finish_once(miss_count, original_length) def __exit__(self, exc_type, exc_val, exc_tb): self.config.free_resource() logging.info(\"%s write done, total", "= len(responses) if self.config.filter: target_responses = list() for i in responses: i =", "\"Count\" in key: field_type = \"BIGINT\" elif value is None: field_type = \"TEXT\"", "still fail to write, \" \"total write %d items, total filtered: %d items,", "__enter__(self): return self def finish_once(self, miss_count, original_length): self.total_miss_count += miss_count self.success_count += original_length", "miss_count += 1 responses = target_responses if not responses: self.finish_once(miss_count, original_length) return #", "field_type = \"BIGINT\" elif value is None: field_type = \"TEXT\" elif key in", "= False tail_sql = \"\"\" \\tPRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=%s \"\"\"", "await self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit() return True except Exception as e: try_time += 1", "65536 / 8 = 8192 # assume this field (the shortest length) *", "v in response.items(): if k not in test_response: test_response[k] = v elif test_response[k]", "\",\" curr_sql = curr_sql[:-1] + '),\\n' if need_specific_sql: sql_keys = \"(\" for each_sql_key", "self.total_miss_count, self.success_count)) def __enter__(self): return self def finish_once(self, miss_count, original_length): self.total_miss_count += miss_count", "+= (\"\\t\" if first_field else \"\\t\\t\") + \"`%s` %s\" % (key, field_type) if", "\")\" sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s VALUES \" % (self.config.table, sql_keys) + curr_sql[:-2]) else: normal_sql", "%d times, still fail to write, \" \"total write %d items, total filtered:", "for key, value in responses[0].items(): if \"Count\" in key: field_type = \"BIGINT\" elif", "VALUES \" % (self.config.table, sql_keys) + curr_sql[:-2]) else: normal_sql = True sql +=", "# init mysql pool miss_count = 0 original_length = len(responses) if self.config.filter: target_responses", "BaseWriter class MySQLWriter(BaseWriter): def __init__(self, config): super().__init__() self.config = config self.total_miss_count = 0", "% (str(difference_set), self.config.table)) self.table_checked = True async def create_table(self, responses): test_response = dict()", "True async def create_table(self, responses): test_response = dict() for response in responses[:50]: for", "responses): test_response = dict() for response in responses[:50]: for k, v in response.items():", "created\") async def perform_write(self, responses): sql = \"REPLACE INTO %s VALUES \" %", "self.success_count += original_length logging.info(\"%s write %d item, filtered %d item\" % (self.config.name, original_length", "k not in test_response: test_response[k] = v elif test_response[k] is None: test_response[k] =", "or isinstance(value, dict) or isinstance(value, list): field_type = \"TEXT\" elif isinstance(value, bool): field_type", "for i in results) real_keys = set(responses[0].keys()) difference_set = real_keys.difference(fields) if difference_set: #", "After filtered, still have responses to write if not self.table_checked: await self.table_check(responses) if", "or isinstance(val, list): val = json.dumps(val) if val is None: curr_sql += 'NULL,'", "% (length, ) sql += (\"\\t\" if first_field else \"\\t\\t\") + \"`%s` %s\"", "k, v in response.items(): if k not in test_response: test_response[k] = v elif", "test_response[k] < v: test_response[k] = v sql = \"\"\" CREATE TABLE `%s` (", "write, \" \"total write %d items, total filtered: %d items, reason: %s\" %", "not self.table_checked: await self.table_check(responses) if await self.perform_write(responses): self.finish_once(miss_count, original_length) def __exit__(self, exc_type, exc_val,", "sql = \"REPLACE INTO %s VALUES \" % (self.config.table, ) normal_sql = False", "curr_sql += 'NULL,' else: curr_sql += repr(val) + \",\" curr_sql = curr_sql[:-1] +", "%s%s VALUES \" % (self.config.table, sql_keys) + curr_sql[:-2]) else: normal_sql = True sql", "v elif test_response[k] is None: test_response[k] = v elif isinstance(v, dict) or isinstance(v,", "4 <= the longest length(8192) elif len(value) > 2048: field_type = \"TEXT\" else:", "should be less than 65536 / 8 = 8192 # assume this field", "i: target_responses.append(i) else: miss_count += 1 responses = target_responses if not responses: self.finish_once(miss_count,", "raise ValueError(\"Field %s not in MySQL Table: %s\" % (str(difference_set), self.config.table)) self.table_checked =", "% (self.config.table, sql_keys) + curr_sql[:-2]) else: normal_sql = True sql += curr_sql sql", "responses[0].items(): if \"Count\" in key: field_type = \"BIGINT\" elif value is None: field_type", "0 self.success_count = 0 self.table_checked = False self.key_fields = list() self.auto_increment_keys = set()", "in (\"content\", ) or isinstance(value, dict) or isinstance(value, list): field_type = \"TEXT\" elif", "of fields raise ValueError(\"Field %s not in MySQL Table: %s\" % (str(difference_set), self.config.table))", "== \"id\": sql += \" NOT NULL,\\n\" else: sql += \",\\n\" if first_field:", "%d item\" % (self.config.name, original_length - miss_count, miss_count)) async def table_check(self, responses): await", "if not responses: self.finish_once(miss_count, original_length) return # After filtered, still have responses to", "Table: %s\" % (str(difference_set), self.config.table)) self.table_checked = True async def create_table(self, responses): test_response", "logging.info(\"Creating table: %s\\n%s\", self.config.table, sql) await self.config.cursor.execute(sql) await self.config.connection.commit() logging.info(\"table created\") async def", "value is None: field_type = \"TEXT\" elif key in (\"content\", ) or isinstance(value,", "for i in results) self.key_fields = list(i[0] for i in results) real_keys =", "self.config.filter(i) if i: target_responses.append(i) else: miss_count += 1 responses = target_responses if not", "self.finish_once(miss_count, original_length) return # After filtered, still have responses to write if not", "need_specific_sql: sql_keys = \"(\" for each_sql_key in keys: sql_keys += each_sql_key + \",\"", "8192 # assume this field (the shortest length) * 4 <= the longest", "if k not in test_response: test_response[k] = v elif test_response[k] is None: test_response[k]", "+ \"`%s` %s\" % (key, field_type) if key == \"id\": sql += \"", "test_response[k] = v elif test_response[k] is None: test_response[k] = v elif isinstance(v, dict)", "in each: need_specific_sql = True continue val = each[field] keys.append(field) if isinstance(val, dict)", "%s, After retry: %d times, still fail to write, \" \"total write %d", "create_table(self, responses): test_response = dict() for response in responses[:50]: for k, v in", "return True except Exception as e: try_time += 1 if try_time < self.config.max_retry:", "self.config.filter: target_responses = list() for i in responses: i = self.config.filter(i) if i:", "item\" % (self.config.name, self.total_miss_count, self.success_count)) def __enter__(self): return self def finish_once(self, miss_count, original_length):", "# so length should be less than 65536 / 8 = 8192 #", "(self.config.charset, ) sql += tail_sql logging.info(\"Creating table: %s\\n%s\", self.config.table, sql) await self.config.cursor.execute(sql) await", "ValueError(\"Field %s not in MySQL Table: %s\" % (str(difference_set), self.config.table)) self.table_checked = True", "+= original_length logging.info(\"%s write %d item, filtered %d item\" % (self.config.name, original_length -", "65536 bytes, utf8 occupy 1-8 bytes per character, # so length should be", "times, still fail to write, \" \"total write %d items, total filtered: %d", "retry: %d times, still fail to write, \" \"total write %d items, total", "%s\" % (str(difference_set), self.config.table)) self.table_checked = True async def create_table(self, responses): test_response =", "key == \"id\": sql += \" NOT NULL,\\n\" else: sql += \",\\n\" if", "perform_write(self, responses): sql = \"REPLACE INTO %s VALUES \" % (self.config.table, ) normal_sql", "miss_count, miss_count)) async def table_check(self, responses): await self.config.cursor.execute(\"SHOW TABLES LIKE '%s'\" % (self.config.table,", "False sql_without_auto_increment_keys = list() for each in responses: need_specific_sql = False keys =", "field_type = \"DOUBLE\" # varchar can store at most 65536 bytes, utf8 occupy", "if normal_sql: ret_sql += sql + \";\\n\" if sql_without_auto_increment_keys: ret_sql += \";\\n\".join(sql_without_auto_increment_keys) ret_sql", "1 responses = target_responses if not responses: self.finish_once(miss_count, original_length) return # After filtered,", "length) * 4 <= the longest length(8192) elif len(value) > 2048: field_type =", "= list() for i in responses: i = self.config.filter(i) if i: target_responses.append(i) else:", "responses = target_responses if not responses: self.finish_once(miss_count, original_length) return # After filtered, still", "self.config.get_mysql_pool_cli() # init mysql pool miss_count = 0 original_length = len(responses) if self.config.filter:", "# assume this field (the shortest length) * 4 <= the longest length(8192)", "(self.config.table, ) first_field = True for key, value in responses[0].items(): if \"Count\" in", "results) real_keys = set(responses[0].keys()) difference_set = real_keys.difference(fields) if difference_set: # real keys not", "in self.key_fields: if field in self.auto_increment_keys and field not in each: need_specific_sql =", "+ \";\\n\" if sql_without_auto_increment_keys: ret_sql += \";\\n\".join(sql_without_auto_increment_keys) ret_sql += \";\" await self.config.cursor.execute(ret_sql) await", "4 if length < 256: length = 256 field_type = \"VARCHAR(%d)\" % (length,", "MySQL writer: %s, After retry: %d times, still fail to write, \" \"total", "write %d items, total filtered: %d items, reason: %s\" % (self.config.name, self.config.max_retry, self.success_count,", "256 field_type = \"VARCHAR(%d)\" % (length, ) sql += (\"\\t\" if first_field else", "or isinstance(v, list): if len(json.dumps(test_response[k])) < len(json.dumps(v)): test_response[k] = v elif v is", "= await self.config.cursor.fetchone() if result is None: await self.create_table(responses) # check field await", "in responses[:50]: for k, v in response.items(): if k not in test_response: test_response[k]", "(self.config.name, self.total_miss_count, self.success_count)) def __enter__(self): return self def finish_once(self, miss_count, original_length): self.total_miss_count +=", "field_type = \"BIGINT\" elif isinstance(value, float): field_type = \"DOUBLE\" # varchar can store", "/ 8 = 8192 # assume this field (the shortest length) * 4", "\"VARCHAR(%d)\" % (length, ) sql += (\"\\t\" if first_field else \"\\t\\t\") + \"`%s`", "sql_keys = sql_keys[:-1] + \")\" sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s VALUES \" % (self.config.table, sql_keys)", "\"auto_increment\" in field: self.auto_increment_keys.add(field[0]) fields = set(i[0] for i in results) self.key_fields =", "length(8192) elif len(value) > 2048: field_type = \"TEXT\" else: length = len(value) *", "not None and test_response[k] < v: test_response[k] = v sql = \"\"\" CREATE", "# real keys not subset of fields raise ValueError(\"Field %s not in MySQL", "normal_sql = False sql_without_auto_increment_keys = list() for each in responses: need_specific_sql = False", "if await self.perform_write(responses): self.finish_once(miss_count, original_length) def __exit__(self, exc_type, exc_val, exc_tb): self.config.free_resource() logging.info(\"%s write", "keys.append(field) if isinstance(val, dict) or isinstance(val, list): val = json.dumps(val) if val is", "exc_tb): self.config.free_resource() logging.info(\"%s write done, total filtered %d item, total write %d item\"", "in self.auto_increment_keys and field not in each: need_specific_sql = True continue val =", "normal_sql = True sql += curr_sql sql = sql[:-2] try_time = 0 while", ")) results = await self.config.cursor.fetchall() for field in results: if \"auto_increment\" in field:", "1 if try_time < self.config.max_retry: logging.error(\"retry: %d, %s\" % (try_time, str(e))) await asyncio.sleep(random.uniform(self.config.random_min_sleep,", "sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s VALUES \" % (self.config.table, sql_keys) + curr_sql[:-2]) else: normal_sql =", "value in responses[0].items(): if \"Count\" in key: field_type = \"BIGINT\" elif value is", "fail to write, \" \"total write %d items, total filtered: %d items, reason:", "\"\\t\\t\") + \"`%s` %s\" % (key, field_type) if key == \"id\": sql +=", "for field in results: if \"auto_increment\" in field: self.auto_increment_keys.add(field[0]) fields = set(i[0] for", "sql += (\"\\t\" if first_field else \"\\t\\t\") + \"`%s` %s\" % (key, field_type)", "return self def finish_once(self, miss_count, original_length): self.total_miss_count += miss_count self.success_count += original_length logging.info(\"%s", "\"DOUBLE\" # varchar can store at most 65536 bytes, utf8 occupy 1-8 bytes", "(self.config.table, )) results = await self.config.cursor.fetchall() for field in results: if \"auto_increment\" in", "elif len(value) > 2048: field_type = \"TEXT\" else: length = len(value) * 4", "miss_count self.success_count += original_length logging.info(\"%s write %d item, filtered %d item\" % (self.config.name,", "= True sql += curr_sql sql = sql[:-2] try_time = 0 while try_time", "this field (the shortest length) * 4 <= the longest length(8192) elif len(value)", "% (self.config.table, )) results = await self.config.cursor.fetchall() for field in results: if \"auto_increment\"", "MySQLWriter(BaseWriter): def __init__(self, config): super().__init__() self.config = config self.total_miss_count = 0 self.success_count =", "= False sql_without_auto_increment_keys = list() for each in responses: need_specific_sql = False keys", "is None: test_response[k] = v elif isinstance(v, dict) or isinstance(v, list): if len(json.dumps(test_response[k]))", "NOT NULL,\\n\" else: sql += \",\\n\" if first_field: first_field = False tail_sql =", "in results: if \"auto_increment\" in field: self.auto_increment_keys.add(field[0]) fields = set(i[0] for i in", "if not self.table_checked: await self.table_check(responses) if await self.perform_write(responses): self.finish_once(miss_count, original_length) def __exit__(self, exc_type,", "need_specific_sql = False keys = list() curr_sql = '(' for field in self.key_fields:", "\"(\" for each_sql_key in keys: sql_keys += each_sql_key + \",\" sql_keys = sql_keys[:-1]", "in responses[0].items(): if \"Count\" in key: field_type = \"BIGINT\" elif value is None:", "return # After filtered, still have responses to write if not self.table_checked: await", "response in responses[:50]: for k, v in response.items(): if k not in test_response:", "miss_count, original_length): self.total_miss_count += miss_count self.success_count += original_length logging.info(\"%s write %d item, filtered", "len(json.dumps(v)): test_response[k] = v elif v is not None and test_response[k] < v:", "isinstance(value, float): field_type = \"DOUBLE\" # varchar can store at most 65536 bytes,", "= \"TEXT\" else: length = len(value) * 4 if length < 256: length", "self.config.connection.commit() logging.info(\"table created\") async def perform_write(self, responses): sql = \"REPLACE INTO %s VALUES", "async def write(self, responses): await self.config.get_mysql_pool_cli() # init mysql pool miss_count = 0", "sql_keys[:-1] + \")\" sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s VALUES \" % (self.config.table, sql_keys) + curr_sql[:-2])", "for i in responses: i = self.config.filter(i) if i: target_responses.append(i) else: miss_count +=", "= \"BIGINT\" elif isinstance(value, float): field_type = \"DOUBLE\" # varchar can store at", "def write(self, responses): await self.config.get_mysql_pool_cli() # init mysql pool miss_count = 0 original_length", "await asyncio.sleep(random.uniform(self.config.random_min_sleep, self.config.random_max_sleep)) else: logging.error(\"Give up MySQL writer: %s, After retry: %d times,", "= 256 field_type = \"VARCHAR(%d)\" % (length, ) sql += (\"\\t\" if first_field", "target_responses.append(i) else: miss_count += 1 responses = target_responses if not responses: self.finish_once(miss_count, original_length)", "int): field_type = \"BIGINT\" elif isinstance(value, float): field_type = \"DOUBLE\" # varchar can", "\"BIGINT\" elif value is None: field_type = \"TEXT\" elif key in (\"content\", )", "field not in each: need_specific_sql = True continue val = each[field] keys.append(field) if", "= json.dumps(val) if val is None: curr_sql += 'NULL,' else: curr_sql += repr(val)", "sql_without_auto_increment_keys: ret_sql += \";\\n\".join(sql_without_auto_increment_keys) ret_sql += \";\" await self.config.cursor.execute(ret_sql) await self.config.cursor.connection.commit() return True", "% (self.config.table, )) result = await self.config.cursor.fetchone() if result is None: await self.create_table(responses)", "elif isinstance(value, int): field_type = \"BIGINT\" elif isinstance(value, float): field_type = \"DOUBLE\" #", "if len(json.dumps(test_response[k])) < len(json.dumps(v)): test_response[k] = v elif v is not None and", "original_length - miss_count, miss_count)) async def table_check(self, responses): await self.config.cursor.execute(\"SHOW TABLES LIKE '%s'\"", "\"REPLACE INTO %s VALUES \" % (self.config.table, ) normal_sql = False sql_without_auto_increment_keys =", "= v elif test_response[k] is None: test_response[k] = v elif isinstance(v, dict) or", "each_sql_key + \",\" sql_keys = sql_keys[:-1] + \")\" sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s VALUES \"", "+ \")\" sql_without_auto_increment_keys.append(\"REPLACE INTO %s%s VALUES \" % (self.config.table, sql_keys) + curr_sql[:-2]) else:", "try_time += 1 if try_time < self.config.max_retry: logging.error(\"retry: %d, %s\" % (try_time, str(e)))", "original_length) return # After filtered, still have responses to write if not self.table_checked:", "= False keys = list() curr_sql = '(' for field in self.key_fields: if", "bytes per character, # so length should be less than 65536 / 8", "curr_sql sql = sql[:-2] try_time = 0 while try_time < self.config.max_retry: try: ret_sql" ]
[ "dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader", "batch in enumerate(vaid_dataloader): img = batch[0] label = batch[1] if val_labels is None:", "import numpy as np from sklearn.metrics import accuracy_score import torch from torch.utils.data import", "= np.zeros((df.shape[0])) train_results = [] for i in range(folds): valid_idx = df[df.kfold ==", "i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() }, f'./model/baseline/val_loss {train_results[i].val_loss}.pth') if __name__ ==", "model(img) # print(f'outputs \\n {outputs}') loss = criterion(outputs, label.squeeze(-1)) loss.backward() t_loss += loss.item()", "DenseCrossEntropy() train_fold_results = [] for epoch in range(EPOCHS): model.train() t_loss = 0 for", "= DenseCrossEntropy() train_fold_results = [] for epoch in range(EPOCHS): model.train() t_loss = 0", "from torch.utils.data import DataLoader import torch.optim as optim from model import CassavaModel from", "== i].index val_preds, train_fold_results = train_one_fold(i, model, optimizer) oof_preds[valid_idx] = val_preds.numpy() train_results +=", "DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device = torch.device(DEVICE) criterion = DenseCrossEntropy() train_fold_results = []", "import CassavaModel from loss import DenseCrossEntropy import dataset from config import * def", "from loss import DenseCrossEntropy import dataset from config import * def train_one_fold(fold, model,", "train_df = df[df.kfold != fold].reset_index(drop=True) valid_df = df[df.kfold == fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df,", "= dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True)", "df[df.kfold != fold].reset_index(drop=True) valid_df = df[df.kfold == fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset", "import DenseCrossEntropy import dataset from config import * def train_one_fold(fold, model, optimizer): df", "is None: val_preds = preds else: val_preds = torch.cat((val_preds, preds), dim=0) val_preds =", "as np from sklearn.metrics import accuracy_score import torch from torch.utils.data import DataLoader import", "train_fold_results = [] for epoch in range(EPOCHS): model.train() t_loss = 0 for step,", "= df[df.kfold != fold].reset_index(drop=True) valid_df = df[df.kfold == fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df, device=DEVICE)", "val_preds.numpy() train_results += train_fold_results torch.save({ 'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()", "/ len(vaid_dataloader) }) return val_preds, train_fold_results def k_fold_train(folds): model = CassavaModel() model.to(DEVICE) plist", "dataset from config import * def train_one_fold(fold, model, optimizer): df = pd.read_csv('./input/train_ohe.csv') train_df", "numpy as np from sklearn.metrics import accuracy_score import torch from torch.utils.data import DataLoader", "= img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) outputs = model(img) # print(f'outputs \\n", "= 0 val_preds = None val_labels = None for step, batch in enumerate(vaid_dataloader):", "pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0])) train_results = [] for i in range(folds): valid_idx =", "= [] for i in range(folds): valid_idx = df[df.kfold == i].index val_preds, train_fold_results", "img = batch[0] label = batch[1] img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE,", "t_loss += loss.item() optimizer.step() optimizer.zero_grad() model.eval() val_loss = 0 val_preds = None val_labels", "accuracy_score import torch from torch.utils.data import DataLoader import torch.optim as optim from model", "optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() }, f'./model/baseline/val_loss {train_results[i].val_loss}.pth') if __name__ == '__main__': k_fold_train(5)", "loss = criterion(outputs, label.squeeze(-1)) val_loss += loss.item() preds = torch.softmax(outputs, dim=1).data.cuda() if val_preds", "label = label.to(DEVICE, dtype=torch.float) with torch.no_grad(): outputs = model(img) loss = criterion(outputs, label.squeeze(-1))", "num_workers=4, shuffle=True) device = torch.device(DEVICE) criterion = DenseCrossEntropy() train_fold_results = [] for epoch", "'lr':5e-5}] optimizer = optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0])) train_results = []", "train_dataset = dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4,", "def k_fold_train(folds): model = CassavaModel() model.to(DEVICE) plist = [{'params':model.parameters(), 'lr':5e-5}] optimizer = optim.Adam(plist)", "valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader = DataLoader(valid_dataset,", "= [] for epoch in range(EPOCHS): model.train() t_loss = 0 for step, batch", "label.to(DEVICE, dtype=torch.float) outputs = model(img) # print(f'outputs \\n {outputs}') loss = criterion(outputs, label.squeeze(-1))", "img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) with torch.no_grad(): outputs = model(img) loss =", "0 for step, batch in enumerate(train_dataloader): img = batch[0] label = batch[1] img", "= [{'params':model.parameters(), 'lr':5e-5}] optimizer = optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0])) train_results", "val_preds is None: val_preds = preds else: val_preds = torch.cat((val_preds, preds), dim=0) val_preds", "df[df.kfold == i].index val_preds, train_fold_results = train_one_fold(i, model, optimizer) oof_preds[valid_idx] = val_preds.numpy() train_results", "print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}') train_fold_results.append({ 'fold': fold, 'epoch': epoch, 'train_loss':", ": {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}') train_fold_results.append({ 'fold': fold, 'epoch': epoch, 'train_loss': t_loss", "torch.cat((val_preds, preds), dim=0) val_preds = torch.argmax(val_preds, dim=1) print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss:", "label = batch[1] img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) outputs =", "= None val_labels = None for step, batch in enumerate(vaid_dataloader): img = batch[0]", "with torch.no_grad(): outputs = model(img) loss = criterion(outputs, label.squeeze(-1)) val_loss += loss.item() preds", "[] for i in range(folds): valid_idx = df[df.kfold == i].index val_preds, train_fold_results =", "import DataLoader import torch.optim as optim from model import CassavaModel from loss import", "= dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE,", "= torch.cat((val_preds, preds), dim=0) val_preds = torch.argmax(val_preds, dim=1) print(f'EPOCH : {epoch}, train_loss: {t_loss},", "device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader =", "pandas as pd import numpy as np from sklearn.metrics import accuracy_score import torch", "loss import DenseCrossEntropy import dataset from config import * def train_one_fold(fold, model, optimizer):", "loss = criterion(outputs, label.squeeze(-1)) loss.backward() t_loss += loss.item() optimizer.step() optimizer.zero_grad() model.eval() val_loss =", "num_workers=4, shuffle=True) vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device = torch.device(DEVICE) criterion =", "model import CassavaModel from loss import DenseCrossEntropy import dataset from config import *", "= None for step, batch in enumerate(vaid_dataloader): img = batch[0] label = batch[1]", "preds), dim=0) val_preds = torch.argmax(val_preds, dim=1) print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}')", "outputs = model(img) # print(f'outputs \\n {outputs}') loss = criterion(outputs, label.squeeze(-1)) loss.backward() t_loss", "DenseCrossEntropy import dataset from config import * def train_one_fold(fold, model, optimizer): df =", "= batch[0] label = batch[1] if val_labels is None: val_labels = label.clone().squeeze(-1) else:", "dtype=torch.float) with torch.no_grad(): outputs = model(img) loss = criterion(outputs, label.squeeze(-1)) val_loss += loss.item()", "= torch.device(DEVICE) criterion = DenseCrossEntropy() train_fold_results = [] for epoch in range(EPOCHS): model.train()", "loss.item() preds = torch.softmax(outputs, dim=1).data.cuda() if val_preds is None: val_preds = preds else:", "img = batch[0] label = batch[1] if val_labels is None: val_labels = label.clone().squeeze(-1)", "fold, 'epoch': epoch, 'train_loss': t_loss / len(train_dataloader), 'valid_loss': val_loss / len(vaid_dataloader) }) return", "is None: val_labels = label.clone().squeeze(-1) else: val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0) img =", "= train_one_fold(i, model, optimizer) oof_preds[valid_idx] = val_preds.numpy() train_results += train_fold_results torch.save({ 'fold': i,", "'train_loss': t_loss / len(train_dataloader), 'valid_loss': val_loss / len(vaid_dataloader) }) return val_preds, train_fold_results def", "from model import CassavaModel from loss import DenseCrossEntropy import dataset from config import", "from sklearn.metrics import accuracy_score import torch from torch.utils.data import DataLoader import torch.optim as", "= pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold != fold].reset_index(drop=True) valid_df = df[df.kfold == fold].reset_index(drop=True) train_dataset", "batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device = torch.device(DEVICE) criterion = DenseCrossEntropy() train_fold_results = [] for", "step, batch in enumerate(train_dataloader): img = batch[0] label = batch[1] img = img.to(DEVICE,", "model, optimizer) oof_preds[valid_idx] = val_preds.numpy() train_results += train_fold_results torch.save({ 'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'],", "= df[df.kfold == fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader", "for epoch in range(EPOCHS): model.train() t_loss = 0 for step, batch in enumerate(train_dataloader):", "for step, batch in enumerate(train_dataloader): img = batch[0] label = batch[1] img =", "dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) outputs = model(img) # print(f'outputs \\n {outputs}') loss", "= model(img) # print(f'outputs \\n {outputs}') loss = criterion(outputs, label.squeeze(-1)) loss.backward() t_loss +=", "# print(f'outputs \\n {outputs}') loss = criterion(outputs, label.squeeze(-1)) loss.backward() t_loss += loss.item() optimizer.step()", "+= loss.item() optimizer.step() optimizer.zero_grad() model.eval() val_loss = 0 val_preds = None val_labels =", "optimizer) oof_preds[valid_idx] = val_preds.numpy() train_results += train_fold_results torch.save({ 'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict':", "print(f'outputs \\n {outputs}') loss = criterion(outputs, label.squeeze(-1)) loss.backward() t_loss += loss.item() optimizer.step() optimizer.zero_grad()", "valid_loss: {val_loss}') train_fold_results.append({ 'fold': fold, 'epoch': epoch, 'train_loss': t_loss / len(train_dataloader), 'valid_loss': val_loss", "model.train() t_loss = 0 for step, batch in enumerate(train_dataloader): img = batch[0] label", "None val_labels = None for step, batch in enumerate(vaid_dataloader): img = batch[0] label", "img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) outputs = model(img) # print(f'outputs", "= batch[0] label = batch[1] img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float)", "'valid_loss': val_loss / len(vaid_dataloader) }) return val_preds, train_fold_results def k_fold_train(folds): model = CassavaModel()", "batch[0] label = batch[1] img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) outputs", "optimizer.step() optimizer.zero_grad() model.eval() val_loss = 0 val_preds = None val_labels = None for", "batch in enumerate(train_dataloader): img = batch[0] label = batch[1] img = img.to(DEVICE, dtype=torch.float)", "optimizer = optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0])) train_results = [] for", "model(img) loss = criterion(outputs, label.squeeze(-1)) val_loss += loss.item() preds = torch.softmax(outputs, dim=1).data.cuda() if", "criterion = DenseCrossEntropy() train_fold_results = [] for epoch in range(EPOCHS): model.train() t_loss =", "= batch[1] if val_labels is None: val_labels = label.clone().squeeze(-1) else: val_labels = torch.cat((val_labels,", "model.eval() val_loss = 0 val_preds = None val_labels = None for step, batch", "else: val_preds = torch.cat((val_preds, preds), dim=0) val_preds = torch.argmax(val_preds, dim=1) print(f'EPOCH : {epoch},", "{t_loss}, valid_loss: {val_loss}') train_fold_results.append({ 'fold': fold, 'epoch': epoch, 'train_loss': t_loss / len(train_dataloader), 'valid_loss':", "k_fold_train(folds): model = CassavaModel() model.to(DEVICE) plist = [{'params':model.parameters(), 'lr':5e-5}] optimizer = optim.Adam(plist) df", "torch.cat((val_labels, label.squeeze(-1)), dim=0) img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) with torch.no_grad():", "dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) with torch.no_grad(): outputs = model(img) loss = criterion(outputs,", "batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device = torch.device(DEVICE) criterion", "val_preds = torch.argmax(val_preds, dim=1) print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}') train_fold_results.append({ 'fold':", "= CassavaModel() model.to(DEVICE) plist = [{'params':model.parameters(), 'lr':5e-5}] optimizer = optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv')", "np from sklearn.metrics import accuracy_score import torch from torch.utils.data import DataLoader import torch.optim", "= criterion(outputs, label.squeeze(-1)) loss.backward() t_loss += loss.item() optimizer.step() optimizer.zero_grad() model.eval() val_loss = 0", "optim from model import CassavaModel from loss import DenseCrossEntropy import dataset from config", "{outputs}') loss = criterion(outputs, label.squeeze(-1)) loss.backward() t_loss += loss.item() optimizer.step() optimizer.zero_grad() model.eval() val_loss", "label.clone().squeeze(-1) else: val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0) img = img.to(DEVICE, dtype=torch.float) label =", "val_preds = preds else: val_preds = torch.cat((val_preds, preds), dim=0) val_preds = torch.argmax(val_preds, dim=1)", "device = torch.device(DEVICE) criterion = DenseCrossEntropy() train_fold_results = [] for epoch in range(EPOCHS):", "= val_preds.numpy() train_results += train_fold_results torch.save({ 'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict':", "torch.save({ 'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() }, f'./model/baseline/val_loss {train_results[i].val_loss}.pth') if", "loss.item() optimizer.step() optimizer.zero_grad() model.eval() val_loss = 0 val_preds = None val_labels = None", "oof_preds[valid_idx] = val_preds.numpy() train_results += train_fold_results torch.save({ 'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(),", "range(folds): valid_idx = df[df.kfold == i].index val_preds, train_fold_results = train_one_fold(i, model, optimizer) oof_preds[valid_idx]", "train_fold_results def k_fold_train(folds): model = CassavaModel() model.to(DEVICE) plist = [{'params':model.parameters(), 'lr':5e-5}] optimizer =", "val_labels is None: val_labels = label.clone().squeeze(-1) else: val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0) img", "df[df.kfold == fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader =", "'epoch': epoch, 'train_loss': t_loss / len(train_dataloader), 'valid_loss': val_loss / len(vaid_dataloader) }) return val_preds,", "import pandas as pd import numpy as np from sklearn.metrics import accuracy_score import", "label.squeeze(-1)) loss.backward() t_loss += loss.item() optimizer.step() optimizer.zero_grad() model.eval() val_loss = 0 val_preds =", "df = pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0])) train_results = [] for i in range(folds):", "torch from torch.utils.data import DataLoader import torch.optim as optim from model import CassavaModel", "train_fold_results torch.save({ 'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() }, f'./model/baseline/val_loss {train_results[i].val_loss}.pth')", "model.to(DEVICE) plist = [{'params':model.parameters(), 'lr':5e-5}] optimizer = optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv') oof_preds =", "vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device = torch.device(DEVICE) criterion = DenseCrossEntropy() train_fold_results", "pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold != fold].reset_index(drop=True) valid_df = df[df.kfold == fold].reset_index(drop=True) train_dataset =", "range(EPOCHS): model.train() t_loss = 0 for step, batch in enumerate(train_dataloader): img = batch[0]", "torch.softmax(outputs, dim=1).data.cuda() if val_preds is None: val_preds = preds else: val_preds = torch.cat((val_preds,", "label.squeeze(-1)), dim=0) img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) with torch.no_grad(): outputs", "import accuracy_score import torch from torch.utils.data import DataLoader import torch.optim as optim from", "in range(EPOCHS): model.train() t_loss = 0 for step, batch in enumerate(train_dataloader): img =", "val_preds, train_fold_results = train_one_fold(i, model, optimizer) oof_preds[valid_idx] = val_preds.numpy() train_results += train_fold_results torch.save({", "valid_df = df[df.kfold == fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE)", "import torch.optim as optim from model import CassavaModel from loss import DenseCrossEntropy import", "= model(img) loss = criterion(outputs, label.squeeze(-1)) val_loss += loss.item() preds = torch.softmax(outputs, dim=1).data.cuda()", "val_labels = None for step, batch in enumerate(vaid_dataloader): img = batch[0] label =", "label.squeeze(-1)) val_loss += loss.item() preds = torch.softmax(outputs, dim=1).data.cuda() if val_preds is None: val_preds", "dim=1) print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}') train_fold_results.append({ 'fold': fold, 'epoch': epoch,", "loss.backward() t_loss += loss.item() optimizer.step() optimizer.zero_grad() model.eval() val_loss = 0 val_preds = None", "if val_labels is None: val_labels = label.clone().squeeze(-1) else: val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0)", "plist = [{'params':model.parameters(), 'lr':5e-5}] optimizer = optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0]))", "config import * def train_one_fold(fold, model, optimizer): df = pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold", "= df[df.kfold == i].index val_preds, train_fold_results = train_one_fold(i, model, optimizer) oof_preds[valid_idx] = val_preds.numpy()", "optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0])) train_results = [] for i in", "train_results += train_fold_results torch.save({ 'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() },", "oof_preds = np.zeros((df.shape[0])) train_results = [] for i in range(folds): valid_idx = df[df.kfold", "in enumerate(train_dataloader): img = batch[0] label = batch[1] img = img.to(DEVICE, dtype=torch.float) label", "from config import * def train_one_fold(fold, model, optimizer): df = pd.read_csv('./input/train_ohe.csv') train_df =", "val_labels = label.clone().squeeze(-1) else: val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0) img = img.to(DEVICE, dtype=torch.float)", "{epoch}, train_loss: {t_loss}, valid_loss: {val_loss}') train_fold_results.append({ 'fold': fold, 'epoch': epoch, 'train_loss': t_loss /", "fold].reset_index(drop=True) valid_df = df[df.kfold == fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df,", "valid_idx = df[df.kfold == i].index val_preds, train_fold_results = train_one_fold(i, model, optimizer) oof_preds[valid_idx] =", "device=DEVICE) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True)", "\\n {outputs}') loss = criterion(outputs, label.squeeze(-1)) loss.backward() t_loss += loss.item() optimizer.step() optimizer.zero_grad() model.eval()", "torch.utils.data import DataLoader import torch.optim as optim from model import CassavaModel from loss", "'fold': fold, 'epoch': epoch, 'train_loss': t_loss / len(train_dataloader), 'valid_loss': val_loss / len(vaid_dataloader) })", "criterion(outputs, label.squeeze(-1)) loss.backward() t_loss += loss.item() optimizer.step() optimizer.zero_grad() model.eval() val_loss = 0 val_preds", "model = CassavaModel() model.to(DEVICE) plist = [{'params':model.parameters(), 'lr':5e-5}] optimizer = optim.Adam(plist) df =", "model, optimizer): df = pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold != fold].reset_index(drop=True) valid_df = df[df.kfold", "dim=0) img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) with torch.no_grad(): outputs =", "as optim from model import CassavaModel from loss import DenseCrossEntropy import dataset from", "= DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device =", "= label.to(DEVICE, dtype=torch.float) outputs = model(img) # print(f'outputs \\n {outputs}') loss = criterion(outputs,", "i in range(folds): valid_idx = df[df.kfold == i].index val_preds, train_fold_results = train_one_fold(i, model,", "t_loss = 0 for step, batch in enumerate(train_dataloader): img = batch[0] label =", "CassavaModel from loss import DenseCrossEntropy import dataset from config import * def train_one_fold(fold,", "enumerate(train_dataloader): img = batch[0] label = batch[1] img = img.to(DEVICE, dtype=torch.float) label =", "None: val_labels = label.clone().squeeze(-1) else: val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0) img = img.to(DEVICE,", "== fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader = DataLoader(train_dataset,", "batch[1] img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) outputs = model(img) #", "DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device = torch.device(DEVICE)", "val_loss = 0 val_preds = None val_labels = None for step, batch in", "torch.no_grad(): outputs = model(img) loss = criterion(outputs, label.squeeze(-1)) val_loss += loss.item() preds =", "= pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0])) train_results = [] for i in range(folds): valid_idx", "sklearn.metrics import accuracy_score import torch from torch.utils.data import DataLoader import torch.optim as optim", "}) return val_preds, train_fold_results def k_fold_train(folds): model = CassavaModel() model.to(DEVICE) plist = [{'params':model.parameters(),", "img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) with torch.no_grad(): outputs = model(img)", "= criterion(outputs, label.squeeze(-1)) val_loss += loss.item() preds = torch.softmax(outputs, dim=1).data.cuda() if val_preds is", "val_loss += loss.item() preds = torch.softmax(outputs, dim=1).data.cuda() if val_preds is None: val_preds =", "'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() }, f'./model/baseline/val_loss {train_results[i].val_loss}.pth') if __name__", "i].index val_preds, train_fold_results = train_one_fold(i, model, optimizer) oof_preds[valid_idx] = val_preds.numpy() train_results += train_fold_results", "'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() }, f'./model/baseline/val_loss {train_results[i].val_loss}.pth') if __name__ == '__main__':", "t_loss / len(train_dataloader), 'valid_loss': val_loss / len(vaid_dataloader) }) return val_preds, train_fold_results def k_fold_train(folds):", "= 0 for step, batch in enumerate(train_dataloader): img = batch[0] label = batch[1]", "len(vaid_dataloader) }) return val_preds, train_fold_results def k_fold_train(folds): model = CassavaModel() model.to(DEVICE) plist =", "val_preds = None val_labels = None for step, batch in enumerate(vaid_dataloader): img =", "None: val_preds = preds else: val_preds = torch.cat((val_preds, preds), dim=0) val_preds = torch.argmax(val_preds,", "train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device", "dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4,", "val_loss / len(vaid_dataloader) }) return val_preds, train_fold_results def k_fold_train(folds): model = CassavaModel() model.to(DEVICE)", "torch.optim as optim from model import CassavaModel from loss import DenseCrossEntropy import dataset", "val_preds, train_fold_results def k_fold_train(folds): model = CassavaModel() model.to(DEVICE) plist = [{'params':model.parameters(), 'lr':5e-5}] optimizer", "preds = torch.softmax(outputs, dim=1).data.cuda() if val_preds is None: val_preds = preds else: val_preds", "train_results = [] for i in range(folds): valid_idx = df[df.kfold == i].index val_preds,", "epoch, 'train_loss': t_loss / len(train_dataloader), 'valid_loss': val_loss / len(vaid_dataloader) }) return val_preds, train_fold_results", "+= train_fold_results torch.save({ 'fold': i, 'lr': optimizer.state_dict()['params_groups'][0]['lr'], 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() }, f'./model/baseline/val_loss", "CassavaModel() model.to(DEVICE) plist = [{'params':model.parameters(), 'lr':5e-5}] optimizer = optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv') oof_preds", "import torch from torch.utils.data import DataLoader import torch.optim as optim from model import", "pd import numpy as np from sklearn.metrics import accuracy_score import torch from torch.utils.data", "optimizer): df = pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold != fold].reset_index(drop=True) valid_df = df[df.kfold ==", "torch.device(DEVICE) criterion = DenseCrossEntropy() train_fold_results = [] for epoch in range(EPOCHS): model.train() t_loss", "else: val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0) img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE,", "label = label.to(DEVICE, dtype=torch.float) outputs = model(img) # print(f'outputs \\n {outputs}') loss =", "train_fold_results.append({ 'fold': fold, 'epoch': epoch, 'train_loss': t_loss / len(train_dataloader), 'valid_loss': val_loss / len(vaid_dataloader)", "= DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device = torch.device(DEVICE) criterion = DenseCrossEntropy() train_fold_results =", "dtype=torch.float) outputs = model(img) # print(f'outputs \\n {outputs}') loss = criterion(outputs, label.squeeze(-1)) loss.backward()", "dim=1).data.cuda() if val_preds is None: val_preds = preds else: val_preds = torch.cat((val_preds, preds),", "shuffle=True) device = torch.device(DEVICE) criterion = DenseCrossEntropy() train_fold_results = [] for epoch in", "preds else: val_preds = torch.cat((val_preds, preds), dim=0) val_preds = torch.argmax(val_preds, dim=1) print(f'EPOCH :", "step, batch in enumerate(vaid_dataloader): img = batch[0] label = batch[1] if val_labels is", "in range(folds): valid_idx = df[df.kfold == i].index val_preds, train_fold_results = train_one_fold(i, model, optimizer)", "df = pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold != fold].reset_index(drop=True) valid_df = df[df.kfold == fold].reset_index(drop=True)", "if val_preds is None: val_preds = preds else: val_preds = torch.cat((val_preds, preds), dim=0)", "= img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) with torch.no_grad(): outputs = model(img) loss", "{val_loss}') train_fold_results.append({ 'fold': fold, 'epoch': epoch, 'train_loss': t_loss / len(train_dataloader), 'valid_loss': val_loss /", "for step, batch in enumerate(vaid_dataloader): img = batch[0] label = batch[1] if val_labels", "for i in range(folds): valid_idx = df[df.kfold == i].index val_preds, train_fold_results = train_one_fold(i,", "fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset = dataset.CassavaDataset(valid_df, device=DEVICE) train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE,", "batch[0] label = batch[1] if val_labels is None: val_labels = label.clone().squeeze(-1) else: val_labels", "val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0) img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float)", "* def train_one_fold(fold, model, optimizer): df = pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold != fold].reset_index(drop=True)", "shuffle=True) vaid_dataloader = DataLoader(valid_dataset, batch_size=BATCH_SIZE, num_workers=4, shuffle=True) device = torch.device(DEVICE) criterion = DenseCrossEntropy()", "train_one_fold(fold, model, optimizer): df = pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold != fold].reset_index(drop=True) valid_df =", "+= loss.item() preds = torch.softmax(outputs, dim=1).data.cuda() if val_preds is None: val_preds = preds", "criterion(outputs, label.squeeze(-1)) val_loss += loss.item() preds = torch.softmax(outputs, dim=1).data.cuda() if val_preds is None:", "None for step, batch in enumerate(vaid_dataloader): img = batch[0] label = batch[1] if", "img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) outputs = model(img) # print(f'outputs \\n {outputs}')", "= label.clone().squeeze(-1) else: val_labels = torch.cat((val_labels, label.squeeze(-1)), dim=0) img = img.to(DEVICE, dtype=torch.float) label", "= torch.cat((val_labels, label.squeeze(-1)), dim=0) img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) with", "DataLoader import torch.optim as optim from model import CassavaModel from loss import DenseCrossEntropy", "import dataset from config import * def train_one_fold(fold, model, optimizer): df = pd.read_csv('./input/train_ohe.csv')", "def train_one_fold(fold, model, optimizer): df = pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold != fold].reset_index(drop=True) valid_df", "= batch[1] img = img.to(DEVICE, dtype=torch.float) label = label.to(DEVICE, dtype=torch.float) outputs = model(img)", "= preds else: val_preds = torch.cat((val_preds, preds), dim=0) val_preds = torch.argmax(val_preds, dim=1) print(f'EPOCH", "= optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0])) train_results = [] for i", "train_fold_results = train_one_fold(i, model, optimizer) oof_preds[valid_idx] = val_preds.numpy() train_results += train_fold_results torch.save({ 'fold':", "label = batch[1] if val_labels is None: val_labels = label.clone().squeeze(-1) else: val_labels =", "= torch.softmax(outputs, dim=1).data.cuda() if val_preds is None: val_preds = preds else: val_preds =", "batch[1] if val_labels is None: val_labels = label.clone().squeeze(-1) else: val_labels = torch.cat((val_labels, label.squeeze(-1)),", "/ len(train_dataloader), 'valid_loss': val_loss / len(vaid_dataloader) }) return val_preds, train_fold_results def k_fold_train(folds): model", "!= fold].reset_index(drop=True) valid_df = df[df.kfold == fold].reset_index(drop=True) train_dataset = dataset.CassavaDataset(train_df, device=DEVICE) valid_dataset =", "0 val_preds = None val_labels = None for step, batch in enumerate(vaid_dataloader): img", "outputs = model(img) loss = criterion(outputs, label.squeeze(-1)) val_loss += loss.item() preds = torch.softmax(outputs,", "= torch.argmax(val_preds, dim=1) print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}') train_fold_results.append({ 'fold': fold,", "label.to(DEVICE, dtype=torch.float) with torch.no_grad(): outputs = model(img) loss = criterion(outputs, label.squeeze(-1)) val_loss +=", "[] for epoch in range(EPOCHS): model.train() t_loss = 0 for step, batch in", "epoch in range(EPOCHS): model.train() t_loss = 0 for step, batch in enumerate(train_dataloader): img", "val_preds = torch.cat((val_preds, preds), dim=0) val_preds = torch.argmax(val_preds, dim=1) print(f'EPOCH : {epoch}, train_loss:", "torch.argmax(val_preds, dim=1) print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}') train_fold_results.append({ 'fold': fold, 'epoch':", "[{'params':model.parameters(), 'lr':5e-5}] optimizer = optim.Adam(plist) df = pd.read_csv('./input/train_ohe.csv') oof_preds = np.zeros((df.shape[0])) train_results =", "= label.to(DEVICE, dtype=torch.float) with torch.no_grad(): outputs = model(img) loss = criterion(outputs, label.squeeze(-1)) val_loss", "np.zeros((df.shape[0])) train_results = [] for i in range(folds): valid_idx = df[df.kfold == i].index", "train_one_fold(i, model, optimizer) oof_preds[valid_idx] = val_preds.numpy() train_results += train_fold_results torch.save({ 'fold': i, 'lr':", "as pd import numpy as np from sklearn.metrics import accuracy_score import torch from", "import * def train_one_fold(fold, model, optimizer): df = pd.read_csv('./input/train_ohe.csv') train_df = df[df.kfold !=", "dim=0) val_preds = torch.argmax(val_preds, dim=1) print(f'EPOCH : {epoch}, train_loss: {t_loss}, valid_loss: {val_loss}') train_fold_results.append({", "optimizer.zero_grad() model.eval() val_loss = 0 val_preds = None val_labels = None for step,", "enumerate(vaid_dataloader): img = batch[0] label = batch[1] if val_labels is None: val_labels =", "return val_preds, train_fold_results def k_fold_train(folds): model = CassavaModel() model.to(DEVICE) plist = [{'params':model.parameters(), 'lr':5e-5}]", "len(train_dataloader), 'valid_loss': val_loss / len(vaid_dataloader) }) return val_preds, train_fold_results def k_fold_train(folds): model =", "in enumerate(vaid_dataloader): img = batch[0] label = batch[1] if val_labels is None: val_labels", "train_loss: {t_loss}, valid_loss: {val_loss}') train_fold_results.append({ 'fold': fold, 'epoch': epoch, 'train_loss': t_loss / len(train_dataloader)," ]
[ "= gdb.parse_and_eval(arg) self.print_list(head) def print_list(self, head): malformed = False seen = [] print", "seen.append(node) while node != head.address: print \"%x ->\" % node, node = node['next']", "self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def invoke(self, arg, from_tty): head = gdb.parse_and_eval(arg) self.print_list(head) def print_list(self,", "\"\"\"Prints out an F4OS linked list in a pretty format\"\"\" def __init__(self): super(Print_List,", "invoke(self, arg, from_tty): head = gdb.parse_and_eval(arg) self.print_list(head) def print_list(self, head): malformed = False", "head['next'] seen.append(node) while node != head.address: print \"%x ->\" % node, node =", "node in seen: malformed = True break seen.append(node) print \"%x\" % node if", "gdb.COMPLETE_SYMBOL) def invoke(self, arg, from_tty): head = gdb.parse_and_eval(arg) self.print_list(head) def print_list(self, head): malformed", "if node in seen: malformed = True break seen.append(node) print \"%x\" % node", "\"%x ->\" % head.address, node = head['next'] seen.append(node) while node != head.address: print", "= [] print \"%x ->\" % head.address, node = head['next'] seen.append(node) while node", "def __init__(self): super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def invoke(self, arg, from_tty): head = gdb.parse_and_eval(arg)", "print \"%x ->\" % node, node = node['next'] if node in seen: malformed", "class Print_List(gdb.Command): \"\"\"Prints out an F4OS linked list in a pretty format\"\"\" def", "self.print_list(head) def print_list(self, head): malformed = False seen = [] print \"%x ->\"", "->\" % node, node = node['next'] if node in seen: malformed = True", "= node['next'] if node in seen: malformed = True break seen.append(node) print \"%x\"", "print_list(self, head): malformed = False seen = [] print \"%x ->\" % head.address,", "% head.address, node = head['next'] seen.append(node) while node != head.address: print \"%x ->\"", "head.address, node = head['next'] seen.append(node) while node != head.address: print \"%x ->\" %", "node, node = node['next'] if node in seen: malformed = True break seen.append(node)", "node = node['next'] if node in seen: malformed = True break seen.append(node) print", "<gh_stars>10-100 import gdb class Print_List(gdb.Command): \"\"\"Prints out an F4OS linked list in a", "F4OS linked list in a pretty format\"\"\" def __init__(self): super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)", "!= head.address: print \"%x ->\" % node, node = node['next'] if node in", "break seen.append(node) print \"%x\" % node if malformed: print \"(Loop detected. Malformed list?)\"", "[] print \"%x ->\" % head.address, node = head['next'] seen.append(node) while node !=", "__init__(self): super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def invoke(self, arg, from_tty): head = gdb.parse_and_eval(arg) self.print_list(head)", "from_tty): head = gdb.parse_and_eval(arg) self.print_list(head) def print_list(self, head): malformed = False seen =", "head = gdb.parse_and_eval(arg) self.print_list(head) def print_list(self, head): malformed = False seen = []", "print \"%x ->\" % head.address, node = head['next'] seen.append(node) while node != head.address:", "super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def invoke(self, arg, from_tty): head = gdb.parse_and_eval(arg) self.print_list(head) def", "format\"\"\" def __init__(self): super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def invoke(self, arg, from_tty): head =", "= False seen = [] print \"%x ->\" % head.address, node = head['next']", "Print_List(gdb.Command): \"\"\"Prints out an F4OS linked list in a pretty format\"\"\" def __init__(self):", "linked list in a pretty format\"\"\" def __init__(self): super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def", "\"%x ->\" % node, node = node['next'] if node in seen: malformed =", "arg, from_tty): head = gdb.parse_and_eval(arg) self.print_list(head) def print_list(self, head): malformed = False seen", "in seen: malformed = True break seen.append(node) print \"%x\" % node if malformed:", "seen: malformed = True break seen.append(node) print \"%x\" % node if malformed: print", "import gdb class Print_List(gdb.Command): \"\"\"Prints out an F4OS linked list in a pretty", "node != head.address: print \"%x ->\" % node, node = node['next'] if node", "node['next'] if node in seen: malformed = True break seen.append(node) print \"%x\" %", "% node, node = node['next'] if node in seen: malformed = True break", "malformed = True break seen.append(node) print \"%x\" % node if malformed: print \"(Loop", "malformed = False seen = [] print \"%x ->\" % head.address, node =", "seen.append(node) print \"%x\" % node if malformed: print \"(Loop detected. Malformed list?)\" Print_List()", "->\" % head.address, node = head['next'] seen.append(node) while node != head.address: print \"%x", "= True break seen.append(node) print \"%x\" % node if malformed: print \"(Loop detected.", "head.address: print \"%x ->\" % node, node = node['next'] if node in seen:", "head): malformed = False seen = [] print \"%x ->\" % head.address, node", "def print_list(self, head): malformed = False seen = [] print \"%x ->\" %", "gdb.parse_and_eval(arg) self.print_list(head) def print_list(self, head): malformed = False seen = [] print \"%x", "list in a pretty format\"\"\" def __init__(self): super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def invoke(self,", "node = head['next'] seen.append(node) while node != head.address: print \"%x ->\" % node,", "in a pretty format\"\"\" def __init__(self): super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def invoke(self, arg,", "an F4OS linked list in a pretty format\"\"\" def __init__(self): super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA,", "a pretty format\"\"\" def __init__(self): super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def invoke(self, arg, from_tty):", "True break seen.append(node) print \"%x\" % node if malformed: print \"(Loop detected. Malformed", "gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def invoke(self, arg, from_tty): head = gdb.parse_and_eval(arg) self.print_list(head) def print_list(self, head):", "def invoke(self, arg, from_tty): head = gdb.parse_and_eval(arg) self.print_list(head) def print_list(self, head): malformed =", "seen = [] print \"%x ->\" % head.address, node = head['next'] seen.append(node) while", "= head['next'] seen.append(node) while node != head.address: print \"%x ->\" % node, node", "while node != head.address: print \"%x ->\" % node, node = node['next'] if", "False seen = [] print \"%x ->\" % head.address, node = head['next'] seen.append(node)", "out an F4OS linked list in a pretty format\"\"\" def __init__(self): super(Print_List, self).__init__(\"print-list\",", "pretty format\"\"\" def __init__(self): super(Print_List, self).__init__(\"print-list\", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL) def invoke(self, arg, from_tty): head", "gdb class Print_List(gdb.Command): \"\"\"Prints out an F4OS linked list in a pretty format\"\"\"" ]
[ "identifier_field self.target_field = target_field def fields_excluded_from_features(self): id_target = [self.identifier_field, self.target_field] return id_target +", "base_feature_fields_categorical(self): fields = sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields) def base_feature_fields(self): return self.base_feature_fields_numerical() + self.base_feature_fields_categorical() def", "def encoded_feature_fields(self): return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self): encoded = self.encoded_feature_fields() return [field", "arg in args: results.update(arg) return results class FeatureSetBase: \"\"\" Generic interface for feature", "out in derived class self.logger = logging.getLogger(__name__) self.params = None self.info = None", "base_features(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields()} def derived_features_categorical(self, processed_row): #", "self.derived_features_categorical(processed_row) return _combine_dicts(num, cat) def features(self, processed_row): base = self.base_features(processed_row) derv = self.derived_features(processed_row)", "def omitted_feature_fields_for_input(self): encoded = self.encoded_feature_fields() return [field for field in encoded if field", "k in self.base_feature_fields()} def derived_features_categorical(self, processed_row): # TODO: override assert isinstance(processed_row, dict) return", "self.base_features(processed_row) derv = self.derived_features(processed_row) return _combine_dicts(base, derv) def ml_fields(self): categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict'])", "return [field for field in encoded if field not in self.base_feature_fields()] # feature", "processed_row): # TODO: override assert isinstance(processed_row, dict) return {} def derived_features(self, processed_row): num", "__init__(self, identifier_field, target_field): # fields to be filled out in derived class self.logger", "for feature sets \"\"\" def __init__(self, identifier_field, target_field): # fields to be filled", "processed_row): # TODO: override assert isinstance(processed_row, dict) return {} def derived_features_numerical(self, processed_row): #", "self.base_feature_fields()] # feature transformations def base_features_numerical(self, processed_row): return {k: processed_row[k] for k in", "= self.base_features(processed_row) derv = self.derived_features(processed_row) return _combine_dicts(base, derv) def ml_fields(self): categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy()", "_exclude(fields, self.fields_excluded_from_features()) def base_feature_fields_numerical(self): fields = self.params['base_fields_numerical'] return self._exclude_non_features(fields) def base_feature_fields_categorical(self): fields =", "self._exclude_non_features(fields) def base_feature_fields_categorical(self): fields = sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields) def base_feature_fields(self): return self.base_feature_fields_numerical() +", "return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical() def available_feature_fields_categorical(self): return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self): return", "self.fields_excluded_from_features()) def base_feature_fields_numerical(self): fields = self.params['base_fields_numerical'] return self._exclude_non_features(fields) def base_feature_fields_categorical(self): fields = sorted(self.params['base_categorical_n_levels_dict'].keys())", "self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded = {k: v for k, v in categorical_n_levels_dict.items() if k", "def derived_feature_fields(self): return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical() def available_feature_fields_numerical(self): return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical() def", "derived_features_numerical(self, processed_row): # TODO: override assert isinstance(processed_row, dict) return {} def derived_features(self, processed_row):", "encoded = self.encoded_feature_fields() return [field for field in encoded if field not in", "results.update(arg) return results class FeatureSetBase: \"\"\" Generic interface for feature sets \"\"\" def", "not in self.base_feature_fields()] # feature transformations def base_features_numerical(self, processed_row): return {k: processed_row[k] for", "= {} for arg in args: results.update(arg) return results class FeatureSetBase: \"\"\" Generic", "def _exclude(fields, excluded): return [field for field in fields if field not in", "return self._exclude_non_features(fields) def base_feature_fields(self): return self.base_feature_fields_numerical() + self.base_feature_fields_categorical() def derived_feature_fields_numerical(self): return self.params['derived_fields_numerical'] def", "set(cat_encoded.keys()).intersection(numeric_fields) if intersection: self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection) raise ValueError('categorical and numeric", "return {k: processed_row[k] for k in self.base_feature_fields_categorical()} def base_features(self, processed_row): return {k: processed_row[k]", "categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded = {k: v for k, v in categorical_n_levels_dict.items() if k in", "return self._exclude_non_features(fields) def base_feature_fields_categorical(self): fields = sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields) def base_feature_fields(self): return self.base_feature_fields_numerical()", "self.base_feature_fields_numerical()} def base_features_categorical(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields_categorical()} def base_features(self,", "self.target_field] return id_target + self.params['extra_information_fields'] def _exclude_non_features(self, fields): return _exclude(fields, self.fields_excluded_from_features()) def base_feature_fields_numerical(self):", "in fields if field not in excluded] def _combine_dicts(*args): results = {} for", "def available_feature_fields_categorical(self): return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self):", "self.base_feature_fields_categorical() + self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields'])", "features(self, processed_row): base = self.base_features(processed_row) derv = self.derived_features(processed_row) return _combine_dicts(base, derv) def ml_fields(self):", "self.target_field = target_field def fields_excluded_from_features(self): id_target = [self.identifier_field, self.target_field] return id_target + self.params['extra_information_fields']", "return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(),", "self.logger.info('intersection') self.logger.info(intersection) raise ValueError('categorical and numeric overlap') return {'categorical': cat_encoded, 'numerical': numeric_fields, 'target_name':", "derv) def ml_fields(self): categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded = {k: v for k,", "not in excluded] def _combine_dicts(*args): results = {} for arg in args: results.update(arg)", "\"\"\" Generic interface for feature sets \"\"\" def __init__(self, identifier_field, target_field): # fields", "self.base_feature_fields()} def derived_features_categorical(self, processed_row): # TODO: override assert isinstance(processed_row, dict) return {} def", "= self.derived_features_categorical(processed_row) return _combine_dicts(num, cat) def features(self, processed_row): base = self.base_features(processed_row) derv =", "# fields to be filled out in derived class self.logger = logging.getLogger(__name__) self.params", "filled out in derived class self.logger = logging.getLogger(__name__) self.params = None self.info =", "_combine_dicts(num, cat) def features(self, processed_row): base = self.base_features(processed_row) derv = self.derived_features(processed_row) return _combine_dicts(base,", "= None self.identifier_field = identifier_field self.target_field = target_field def fields_excluded_from_features(self): id_target = [self.identifier_field,", "{k: processed_row[k] for k in self.base_feature_fields_categorical()} def base_features(self, processed_row): return {k: processed_row[k] for", "k in self.base_feature_fields_categorical()} def base_features(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields()}", "class FeatureSetBase: \"\"\" Generic interface for feature sets \"\"\" def __init__(self, identifier_field, target_field):", "return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self): return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical() def available_feature_fields_numerical(self): return self.base_feature_fields_numerical() +", "available_feature_fields_numerical(self): return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical() def available_feature_fields_categorical(self): return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self):", "_exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def encoded_feature_fields(self): return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self): encoded = self.encoded_feature_fields()", "self.identifier_field = identifier_field self.target_field = target_field def fields_excluded_from_features(self): id_target = [self.identifier_field, self.target_field] return", "v in categorical_n_levels_dict.items() if k in self.encoded_feature_fields_categorical()} numeric_fields = self.encoded_feature_fields_numerical() intersection = set(cat_encoded.keys()).intersection(numeric_fields)", "None self.identifier_field = identifier_field self.target_field = target_field def fields_excluded_from_features(self): id_target = [self.identifier_field, self.target_field]", "+ self.params['extra_information_fields'] def _exclude_non_features(self, fields): return _exclude(fields, self.fields_excluded_from_features()) def base_feature_fields_numerical(self): fields = self.params['base_fields_numerical']", "= self.derived_features_numerical(processed_row) cat = self.derived_features_categorical(processed_row) return _combine_dicts(num, cat) def features(self, processed_row): base =", "_combine_dicts(base, derv) def ml_fields(self): categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded = {k: v for", "isinstance(processed_row, dict) return {} def derived_features(self, processed_row): num = self.derived_features_numerical(processed_row) cat = self.derived_features_categorical(processed_row)", "for k, v in categorical_n_levels_dict.items() if k in self.encoded_feature_fields_categorical()} numeric_fields = self.encoded_feature_fields_numerical() intersection", "return {k: processed_row[k] for k in self.base_feature_fields()} def derived_features_categorical(self, processed_row): # TODO: override", "in self.base_feature_fields_categorical()} def base_features(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields()} def", "id_target = [self.identifier_field, self.target_field] return id_target + self.params['extra_information_fields'] def _exclude_non_features(self, fields): return _exclude(fields,", "ml_fields(self): categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded = {k: v for k, v in", "_exclude_non_features(self, fields): return _exclude(fields, self.fields_excluded_from_features()) def base_feature_fields_numerical(self): fields = self.params['base_fields_numerical'] return self._exclude_non_features(fields) def", "for field in fields if field not in excluded] def _combine_dicts(*args): results =", "None self.info = None self.identifier_field = identifier_field self.target_field = target_field def fields_excluded_from_features(self): id_target", "Generic interface for feature sets \"\"\" def __init__(self, identifier_field, target_field): # fields to", "self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self): return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical() def available_feature_fields_numerical(self):", "processed_row): return {k: processed_row[k] for k in self.base_feature_fields()} def derived_features_categorical(self, processed_row): # TODO:", "override assert isinstance(processed_row, dict) return {} def derived_features_numerical(self, processed_row): # TODO: override assert", "= target_field def fields_excluded_from_features(self): id_target = [self.identifier_field, self.target_field] return id_target + self.params['extra_information_fields'] def", "= self.encoded_feature_fields_numerical() intersection = set(cat_encoded.keys()).intersection(numeric_fields) if intersection: self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection)", "{k: processed_row[k] for k in self.base_feature_fields()} def derived_features_categorical(self, processed_row): # TODO: override assert", "self.params['extra_information_fields'] def _exclude_non_features(self, fields): return _exclude(fields, self.fields_excluded_from_features()) def base_feature_fields_numerical(self): fields = self.params['base_fields_numerical'] return", "cat_encoded = {k: v for k, v in categorical_n_levels_dict.items() if k in self.encoded_feature_fields_categorical()}", "[self.identifier_field, self.target_field] return id_target + self.params['extra_information_fields'] def _exclude_non_features(self, fields): return _exclude(fields, self.fields_excluded_from_features()) def", "fields): return _exclude(fields, self.fields_excluded_from_features()) def base_feature_fields_numerical(self): fields = self.params['base_fields_numerical'] return self._exclude_non_features(fields) def base_feature_fields_categorical(self):", "return {} def derived_features(self, processed_row): num = self.derived_features_numerical(processed_row) cat = self.derived_features_categorical(processed_row) return _combine_dicts(num,", "derived class self.logger = logging.getLogger(__name__) self.params = None self.info = None self.identifier_field =", "derived_feature_fields_numerical(self): return self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self): return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical()", "derived_feature_fields(self): return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical() def available_feature_fields_numerical(self): return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical() def available_feature_fields_categorical(self):", "def encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def encoded_feature_fields(self): return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self):", "self.base_feature_fields_numerical() + self.derived_feature_fields_numerical() def available_feature_fields_categorical(self): return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(),", "= self.encoded_feature_fields() return [field for field in encoded if field not in self.base_feature_fields()]", "self.encoded_feature_fields_numerical() intersection = set(cat_encoded.keys()).intersection(numeric_fields) if intersection: self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection) raise", "omitted_feature_fields_for_input(self): encoded = self.encoded_feature_fields() return [field for field in encoded if field not", "def _exclude_non_features(self, fields): return _exclude(fields, self.fields_excluded_from_features()) def base_feature_fields_numerical(self): fields = self.params['base_fields_numerical'] return self._exclude_non_features(fields)", "{k: processed_row[k] for k in self.base_feature_fields_numerical()} def base_features_categorical(self, processed_row): return {k: processed_row[k] for", "def derived_features(self, processed_row): num = self.derived_features_numerical(processed_row) cat = self.derived_features_categorical(processed_row) return _combine_dicts(num, cat) def", "self.derived_feature_fields_categorical() def available_feature_fields_numerical(self): return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical() def available_feature_fields_categorical(self): return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical()", "base_features_categorical(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields_categorical()} def base_features(self, processed_row): return", "= logging.getLogger(__name__) self.params = None self.info = None self.identifier_field = identifier_field self.target_field =", "assert isinstance(processed_row, dict) return {} def derived_features_numerical(self, processed_row): # TODO: override assert isinstance(processed_row,", "self.encoded_feature_fields() return [field for field in encoded if field not in self.base_feature_fields()] #", "intersection = set(cat_encoded.keys()).intersection(numeric_fields) if intersection: self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection) raise ValueError('categorical", "= {k: v for k, v in categorical_n_levels_dict.items() if k in self.encoded_feature_fields_categorical()} numeric_fields", "return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def encoded_feature_fields(self): return self.encoded_feature_fields_numerical() +", "+ self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self): encoded = self.encoded_feature_fields() return [field for field in encoded", "isinstance(processed_row, dict) return {} def derived_features_numerical(self, processed_row): # TODO: override assert isinstance(processed_row, dict)", "def base_features_numerical(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields_numerical()} def base_features_categorical(self, processed_row):", "fields = self.params['base_fields_numerical'] return self._exclude_non_features(fields) def base_feature_fields_categorical(self): fields = sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields) def", "for k in self.base_feature_fields_numerical()} def base_features_categorical(self, processed_row): return {k: processed_row[k] for k in", "identifier_field, target_field): # fields to be filled out in derived class self.logger =", "_exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def encoded_feature_fields(self): return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical()", "import logging def _exclude(fields, excluded): return [field for field in fields if field", "processed_row[k] for k in self.base_feature_fields()} def derived_features_categorical(self, processed_row): # TODO: override assert isinstance(processed_row,", "cat) def features(self, processed_row): base = self.base_features(processed_row) derv = self.derived_features(processed_row) return _combine_dicts(base, derv)", "results class FeatureSetBase: \"\"\" Generic interface for feature sets \"\"\" def __init__(self, identifier_field,", "available_feature_fields_categorical(self): return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self): return", "return {k: processed_row[k] for k in self.base_feature_fields_numerical()} def base_features_categorical(self, processed_row): return {k: processed_row[k]", "\"\"\" def __init__(self, identifier_field, target_field): # fields to be filled out in derived", "return self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self): return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical() def", "if field not in self.base_feature_fields()] # feature transformations def base_features_numerical(self, processed_row): return {k:", "in categorical_n_levels_dict.items() if k in self.encoded_feature_fields_categorical()} numeric_fields = self.encoded_feature_fields_numerical() intersection = set(cat_encoded.keys()).intersection(numeric_fields) if", "fields if field not in excluded] def _combine_dicts(*args): results = {} for arg", "self.params = None self.info = None self.identifier_field = identifier_field self.target_field = target_field def", "in encoded if field not in self.base_feature_fields()] # feature transformations def base_features_numerical(self, processed_row):", "numeric_fields = self.encoded_feature_fields_numerical() intersection = set(cat_encoded.keys()).intersection(numeric_fields) if intersection: self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection')", "# TODO: override assert isinstance(processed_row, dict) return {} def derived_features(self, processed_row): num =", "self.base_feature_fields_categorical() def derived_feature_fields_numerical(self): return self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self): return self.derived_feature_fields_numerical()", "encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def encoded_feature_fields(self): return self.encoded_feature_fields_numerical()", "be filled out in derived class self.logger = logging.getLogger(__name__) self.params = None self.info", "self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection) raise ValueError('categorical and numeric overlap') return {'categorical': cat_encoded, 'numerical': numeric_fields,", "return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def encoded_feature_fields(self): return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self): encoded =", "return _combine_dicts(base, derv) def ml_fields(self): categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded = {k: v", "self.base_feature_fields_numerical() + self.base_feature_fields_categorical() def derived_feature_fields_numerical(self): return self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self):", "return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self): encoded = self.encoded_feature_fields() return [field for field", "# feature transformations def base_features_numerical(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields_numerical()}", "+ self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def", "field in fields if field not in excluded] def _combine_dicts(*args): results = {}", "def derived_features_numerical(self, processed_row): # TODO: override assert isinstance(processed_row, dict) return {} def derived_features(self,", "categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded = {k: v for k, v in categorical_n_levels_dict.items()", "def __init__(self, identifier_field, target_field): # fields to be filled out in derived class", "self.info = None self.identifier_field = identifier_field self.target_field = target_field def fields_excluded_from_features(self): id_target =", "target_field def fields_excluded_from_features(self): id_target = [self.identifier_field, self.target_field] return id_target + self.params['extra_information_fields'] def _exclude_non_features(self,", "interface for feature sets \"\"\" def __init__(self, identifier_field, target_field): # fields to be", "= self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded = {k: v for k, v in categorical_n_levels_dict.items() if", "def derived_feature_fields_numerical(self): return self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self): return self.derived_feature_fields_numerical() +", "fields to be filled out in derived class self.logger = logging.getLogger(__name__) self.params =", "derv = self.derived_features(processed_row) return _combine_dicts(base, derv) def ml_fields(self): categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded", "def base_feature_fields(self): return self.base_feature_fields_numerical() + self.base_feature_fields_categorical() def derived_feature_fields_numerical(self): return self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self): return", "def features(self, processed_row): base = self.base_features(processed_row) derv = self.derived_features(processed_row) return _combine_dicts(base, derv) def", "fields_excluded_from_features(self): id_target = [self.identifier_field, self.target_field] return id_target + self.params['extra_information_fields'] def _exclude_non_features(self, fields): return", "return _exclude(fields, self.fields_excluded_from_features()) def base_feature_fields_numerical(self): fields = self.params['base_fields_numerical'] return self._exclude_non_features(fields) def base_feature_fields_categorical(self): fields", "processed_row): return {k: processed_row[k] for k in self.base_feature_fields_numerical()} def base_features_categorical(self, processed_row): return {k:", "def derived_features_categorical(self, processed_row): # TODO: override assert isinstance(processed_row, dict) return {} def derived_features_numerical(self,", "dict) return {} def derived_features_numerical(self, processed_row): # TODO: override assert isinstance(processed_row, dict) return", "self.derived_features_numerical(processed_row) cat = self.derived_features_categorical(processed_row) return _combine_dicts(num, cat) def features(self, processed_row): base = self.base_features(processed_row)", "processed_row[k] for k in self.base_feature_fields_numerical()} def base_features_categorical(self, processed_row): return {k: processed_row[k] for k", "transformations def base_features_numerical(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields_numerical()} def base_features_categorical(self,", "override assert isinstance(processed_row, dict) return {} def derived_features(self, processed_row): num = self.derived_features_numerical(processed_row) cat", "base_features_numerical(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields_numerical()} def base_features_categorical(self, processed_row): return", "if intersection: self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection) raise ValueError('categorical and numeric overlap')", "return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical() def available_feature_fields_numerical(self): return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical() def available_feature_fields_categorical(self): return", "{k: v for k, v in categorical_n_levels_dict.items() if k in self.encoded_feature_fields_categorical()} numeric_fields =", "in self.base_feature_fields()} def derived_features_categorical(self, processed_row): # TODO: override assert isinstance(processed_row, dict) return {}", "return [field for field in fields if field not in excluded] def _combine_dicts(*args):", "in excluded] def _combine_dicts(*args): results = {} for arg in args: results.update(arg) return", "fields = sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields) def base_feature_fields(self): return self.base_feature_fields_numerical() + self.base_feature_fields_categorical() def derived_feature_fields_numerical(self):", "in self.base_feature_fields_numerical()} def base_features_categorical(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields_categorical()} def", "self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical() def available_feature_fields_numerical(self): return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical() def available_feature_fields_categorical(self): return self.base_feature_fields_categorical()", "+ self.derived_feature_fields_numerical() def available_feature_fields_categorical(self): return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields'])", "processed_row[k] for k in self.base_feature_fields_categorical()} def base_features(self, processed_row): return {k: processed_row[k] for k", "def ml_fields(self): categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded = {k: v for k, v", "= sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields) def base_feature_fields(self): return self.base_feature_fields_numerical() + self.base_feature_fields_categorical() def derived_feature_fields_numerical(self): return", "def base_features_categorical(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields_categorical()} def base_features(self, processed_row):", "assert isinstance(processed_row, dict) return {} def derived_features(self, processed_row): num = self.derived_features_numerical(processed_row) cat =", "for k in self.base_feature_fields()} def derived_features_categorical(self, processed_row): # TODO: override assert isinstance(processed_row, dict)", "_combine_dicts(*args): results = {} for arg in args: results.update(arg) return results class FeatureSetBase:", "processed_row): base = self.base_features(processed_row) derv = self.derived_features(processed_row) return _combine_dicts(base, derv) def ml_fields(self): categorical_n_levels_dict", "self._exclude_non_features(fields) def base_feature_fields(self): return self.base_feature_fields_numerical() + self.base_feature_fields_categorical() def derived_feature_fields_numerical(self): return self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self):", "encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def encoded_feature_fields(self): return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self): encoded", "in self.encoded_feature_fields_categorical()} numeric_fields = self.encoded_feature_fields_numerical() intersection = set(cat_encoded.keys()).intersection(numeric_fields) if intersection: self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical')", "target_field): # fields to be filled out in derived class self.logger = logging.getLogger(__name__)", "sets \"\"\" def __init__(self, identifier_field, target_field): # fields to be filled out in", "logging def _exclude(fields, excluded): return [field for field in fields if field not", "def base_feature_fields_numerical(self): fields = self.params['base_fields_numerical'] return self._exclude_non_features(fields) def base_feature_fields_categorical(self): fields = sorted(self.params['base_categorical_n_levels_dict'].keys()) return", "FeatureSetBase: \"\"\" Generic interface for feature sets \"\"\" def __init__(self, identifier_field, target_field): #", "base = self.base_features(processed_row) derv = self.derived_features(processed_row) return _combine_dicts(base, derv) def ml_fields(self): categorical_n_levels_dict =", "for arg in args: results.update(arg) return results class FeatureSetBase: \"\"\" Generic interface for", "if field not in excluded] def _combine_dicts(*args): results = {} for arg in", "sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields) def base_feature_fields(self): return self.base_feature_fields_numerical() + self.base_feature_fields_categorical() def derived_feature_fields_numerical(self): return self.params['derived_fields_numerical']", "sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self): return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical() def available_feature_fields_numerical(self): return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical()", "categorical_n_levels_dict.items() if k in self.encoded_feature_fields_categorical()} numeric_fields = self.encoded_feature_fields_numerical() intersection = set(cat_encoded.keys()).intersection(numeric_fields) if intersection:", "feature sets \"\"\" def __init__(self, identifier_field, target_field): # fields to be filled out", "derived_features(self, processed_row): num = self.derived_features_numerical(processed_row) cat = self.derived_features_categorical(processed_row) return _combine_dicts(num, cat) def features(self,", "{} for arg in args: results.update(arg) return results class FeatureSetBase: \"\"\" Generic interface", "base_feature_fields_numerical(self): fields = self.params['base_fields_numerical'] return self._exclude_non_features(fields) def base_feature_fields_categorical(self): fields = sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields)", "self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self): encoded = self.encoded_feature_fields() return [field for field in", "TODO: override assert isinstance(processed_row, dict) return {} def derived_features(self, processed_row): num = self.derived_features_numerical(processed_row)", "<reponame>camila-contreras/CD4ML-Scenarios<gh_stars>100-1000 import logging def _exclude(fields, excluded): return [field for field in fields if", "self.base_feature_fields_categorical()} def base_features(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields()} def derived_features_categorical(self,", "num = self.derived_features_numerical(processed_row) cat = self.derived_features_categorical(processed_row) return _combine_dicts(num, cat) def features(self, processed_row): base", "{} def derived_features(self, processed_row): num = self.derived_features_numerical(processed_row) cat = self.derived_features_categorical(processed_row) return _combine_dicts(num, cat)", "excluded): return [field for field in fields if field not in excluded] def", "def derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self): return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical() def available_feature_fields_numerical(self): return", "{} def derived_features_numerical(self, processed_row): # TODO: override assert isinstance(processed_row, dict) return {} def", "encoded_feature_fields(self): return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self): encoded = self.encoded_feature_fields() return [field for", "class self.logger = logging.getLogger(__name__) self.params = None self.info = None self.identifier_field = identifier_field", "def encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def encoded_feature_fields(self): return", "logging.getLogger(__name__) self.params = None self.info = None self.identifier_field = identifier_field self.target_field = target_field", "= set(cat_encoded.keys()).intersection(numeric_fields) if intersection: self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection) raise ValueError('categorical and", "self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self): encoded = self.encoded_feature_fields() return [field for field in encoded if", "_exclude(fields, excluded): return [field for field in fields if field not in excluded]", "results = {} for arg in args: results.update(arg) return results class FeatureSetBase: \"\"\"", "derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self): return self.derived_feature_fields_numerical() + self.derived_feature_fields_categorical() def available_feature_fields_numerical(self): return self.base_feature_fields_numerical()", "dict) return {} def derived_features(self, processed_row): num = self.derived_features_numerical(processed_row) cat = self.derived_features_categorical(processed_row) return", "self.derived_features(processed_row) return _combine_dicts(base, derv) def ml_fields(self): categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded = {k:", "self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection) raise ValueError('categorical and numeric overlap') return {'categorical': cat_encoded, 'numerical':", "excluded] def _combine_dicts(*args): results = {} for arg in args: results.update(arg) return results", "def _combine_dicts(*args): results = {} for arg in args: results.update(arg) return results class", "field not in self.base_feature_fields()] # feature transformations def base_features_numerical(self, processed_row): return {k: processed_row[k]", "processed_row): return {k: processed_row[k] for k in self.base_feature_fields_categorical()} def base_features(self, processed_row): return {k:", "derived_features_categorical(self, processed_row): # TODO: override assert isinstance(processed_row, dict) return {} def derived_features_numerical(self, processed_row):", "self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection) raise ValueError('categorical and numeric overlap') return {'categorical':", "[field for field in encoded if field not in self.base_feature_fields()] # feature transformations", "field not in excluded] def _combine_dicts(*args): results = {} for arg in args:", "in args: results.update(arg) return results class FeatureSetBase: \"\"\" Generic interface for feature sets", "feature transformations def base_features_numerical(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields_numerical()} def", "in derived class self.logger = logging.getLogger(__name__) self.params = None self.info = None self.identifier_field", "in self.base_feature_fields()] # feature transformations def base_features_numerical(self, processed_row): return {k: processed_row[k] for k", "v for k, v in categorical_n_levels_dict.items() if k in self.encoded_feature_fields_categorical()} numeric_fields = self.encoded_feature_fields_numerical()", "return self.base_feature_fields_numerical() + self.base_feature_fields_categorical() def derived_feature_fields_numerical(self): return self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def", "return results class FeatureSetBase: \"\"\" Generic interface for feature sets \"\"\" def __init__(self,", "def fields_excluded_from_features(self): id_target = [self.identifier_field, self.target_field] return id_target + self.params['extra_information_fields'] def _exclude_non_features(self, fields):", "+ self.derived_feature_fields_categorical() def available_feature_fields_numerical(self): return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical() def available_feature_fields_categorical(self): return self.base_feature_fields_categorical() +", "cat = self.derived_features_categorical(processed_row) return _combine_dicts(num, cat) def features(self, processed_row): base = self.base_features(processed_row) derv", "id_target + self.params['extra_information_fields'] def _exclude_non_features(self, fields): return _exclude(fields, self.fields_excluded_from_features()) def base_feature_fields_numerical(self): fields =", "base_feature_fields(self): return self.base_feature_fields_numerical() + self.base_feature_fields_categorical() def derived_feature_fields_numerical(self): return self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys())", "[field for field in fields if field not in excluded] def _combine_dicts(*args): results", "self.encoded_feature_fields_categorical()} numeric_fields = self.encoded_feature_fields_numerical() intersection = set(cat_encoded.keys()).intersection(numeric_fields) if intersection: self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields)", "k, v in categorical_n_levels_dict.items() if k in self.encoded_feature_fields_categorical()} numeric_fields = self.encoded_feature_fields_numerical() intersection =", "self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection) raise ValueError('categorical and numeric overlap') return {'categorical': cat_encoded,", "+ self.base_feature_fields_categorical() def derived_feature_fields_numerical(self): return self.params['derived_fields_numerical'] def derived_feature_fields_categorical(self): return sorted(self.params['derived_categorical_n_levels_dict'].keys()) def derived_feature_fields(self): return", "self.params['base_fields_numerical'] return self._exclude_non_features(fields) def base_feature_fields_categorical(self): fields = sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields) def base_feature_fields(self): return", "self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def encoded_feature_fields(self):", "= None self.info = None self.identifier_field = identifier_field self.target_field = target_field def fields_excluded_from_features(self):", "return {} def derived_features_numerical(self, processed_row): # TODO: override assert isinstance(processed_row, dict) return {}", "if k in self.encoded_feature_fields_categorical()} numeric_fields = self.encoded_feature_fields_numerical() intersection = set(cat_encoded.keys()).intersection(numeric_fields) if intersection: self.logger.info('categorical')", "return _combine_dicts(num, cat) def features(self, processed_row): base = self.base_features(processed_row) derv = self.derived_features(processed_row) return", "for field in encoded if field not in self.base_feature_fields()] # feature transformations def", "args: results.update(arg) return results class FeatureSetBase: \"\"\" Generic interface for feature sets \"\"\"", "k in self.encoded_feature_fields_categorical()} numeric_fields = self.encoded_feature_fields_numerical() intersection = set(cat_encoded.keys()).intersection(numeric_fields) if intersection: self.logger.info('categorical') self.logger.info(cat_encoded)", "self.derived_feature_fields_numerical() def available_feature_fields_categorical(self): return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical() def encoded_feature_fields_numerical(self): return _exclude(self.available_feature_fields_numerical(), self.params['encoder_excluded_fields']) def", "TODO: override assert isinstance(processed_row, dict) return {} def derived_features_numerical(self, processed_row): # TODO: override", "= self.params['base_fields_numerical'] return self._exclude_non_features(fields) def base_feature_fields_categorical(self): fields = sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields) def base_feature_fields(self):", "self.logger = logging.getLogger(__name__) self.params = None self.info = None self.identifier_field = identifier_field self.target_field", "def base_feature_fields_categorical(self): fields = sorted(self.params['base_categorical_n_levels_dict'].keys()) return self._exclude_non_features(fields) def base_feature_fields(self): return self.base_feature_fields_numerical() + self.base_feature_fields_categorical()", "intersection: self.logger.info('categorical') self.logger.info(cat_encoded) self.logger.info('numerical') self.logger.info(numeric_fields) self.logger.info('intersection') self.logger.info(intersection) raise ValueError('categorical and numeric overlap') return", "return id_target + self.params['extra_information_fields'] def _exclude_non_features(self, fields): return _exclude(fields, self.fields_excluded_from_features()) def base_feature_fields_numerical(self): fields", "= [self.identifier_field, self.target_field] return id_target + self.params['extra_information_fields'] def _exclude_non_features(self, fields): return _exclude(fields, self.fields_excluded_from_features())", "processed_row): num = self.derived_features_numerical(processed_row) cat = self.derived_features_categorical(processed_row) return _combine_dicts(num, cat) def features(self, processed_row):", "= self.derived_features(processed_row) return _combine_dicts(base, derv) def ml_fields(self): categorical_n_levels_dict = self.params['base_categorical_n_levels_dict'].copy() categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict']) cat_encoded =", "for k in self.base_feature_fields_categorical()} def base_features(self, processed_row): return {k: processed_row[k] for k in", "k in self.base_feature_fields_numerical()} def base_features_categorical(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields_categorical()}", "self.params['encoder_excluded_fields']) def encoded_feature_fields(self): return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical() def omitted_feature_fields_for_input(self): encoded = self.encoded_feature_fields() return", "def base_features(self, processed_row): return {k: processed_row[k] for k in self.base_feature_fields()} def derived_features_categorical(self, processed_row):", "# TODO: override assert isinstance(processed_row, dict) return {} def derived_features_numerical(self, processed_row): # TODO:", "= identifier_field self.target_field = target_field def fields_excluded_from_features(self): id_target = [self.identifier_field, self.target_field] return id_target", "field in encoded if field not in self.base_feature_fields()] # feature transformations def base_features_numerical(self,", "encoded if field not in self.base_feature_fields()] # feature transformations def base_features_numerical(self, processed_row): return", "to be filled out in derived class self.logger = logging.getLogger(__name__) self.params = None", "self.logger.info(intersection) raise ValueError('categorical and numeric overlap') return {'categorical': cat_encoded, 'numerical': numeric_fields, 'target_name': self.target_field}", "def available_feature_fields_numerical(self): return self.base_feature_fields_numerical() + self.derived_feature_fields_numerical() def available_feature_fields_categorical(self): return self.base_feature_fields_categorical() + self.derived_feature_fields_categorical() def", "self.params['encoder_excluded_fields']) def encoded_feature_fields_categorical(self): return _exclude(self.available_feature_fields_categorical(), self.params['encoder_excluded_fields']) def encoded_feature_fields(self): return self.encoded_feature_fields_numerical() + self.encoded_feature_fields_categorical() def" ]
[ "value) write_config() def add_profile(profile): if config.has_section(profile): click.echo('Section [{}] already exists!!'.format(profile)) return config.add_section(profile) write_config()", "= configparser.ConfigParser() def get_config_path(): homedir = os.environ.get('HOME', None) if not homedir: click.echo('Home Directory", "logger.debug('Config File Location: {}'.format(config_file)) if not os.path.exists(config_file): click.echo('ERROR: No Config file present') try:", "os import sys import click import utils.logger as logger from utils.const import CONFIG_FILE_PATH", "write_config() def write_config(): with open(config_file, 'w') as configfile: config.write(configfile) def get_env(profile, key): if", "= os.environ.get('HOME', None) if not homedir: click.echo('Home Directory Not found!! Set Environment `HOME`", "CONFIG_FILE_PATH) logger.debug('Config File Location: {}'.format(config_file)) if not os.path.exists(config_file): click.echo('ERROR: No Config file present')", "config file') logger.debug('Creating config file') file = open(config_file, 'w') file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file)) return", "exists!!'.format(profile)) return config.add_section(profile) write_config() def write_config(): with open(config_file, 'w') as configfile: config.write(configfile) def", "config.write(configfile) def get_env(profile, key): if has_env(profile, key): return config.get(profile, key) logger.debug('Not found in", "current profile') click.echo('Value not found in {profile} use `cgccli config` command'.format(profile=profile)) exit() def", "if has_env(profile, key): return config.get(profile, key) logger.debug('Not found in current profile') click.echo('Value not", "import sys import click import utils.logger as logger from utils.const import CONFIG_FILE_PATH if", "not found in {profile} use `cgccli config` command'.format(profile=profile)) exit() def has_env(profile, key): if", "if profile: logger.debug('Searching in profile : {}'.format(profile)) logger.debug('Searching key {}'.format(key)) return config.has_option(profile, key)", "key, value) write_config() def add_profile(profile): if config.has_section(profile): click.echo('Section [{}] already exists!!'.format(profile)) return config.add_section(profile)", "click.echo('Value not found in {profile} use `cgccli config` command'.format(profile=profile)) exit() def has_env(profile, key):", "'w') as configfile: config.write(configfile) def get_env(profile, key): if has_env(profile, key): return config.get(profile, key)", "file') logger.debug('Creating config file') file = open(config_file, 'w') file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file)) return config_file", "not config.has_section(profile): config.add_section(profile) config.set(profile, key, value) write_config() def add_profile(profile): if config.has_section(profile): click.echo('Section [{}]", "logger.debug('Home Directory: {}'.format(homedir)) config_file = os.path.join(homedir + CONFIG_FILE_PATH) logger.debug('Config File Location: {}'.format(config_file)) if", "def has_env(profile, key): if profile: logger.debug('Searching in profile : {}'.format(profile)) logger.debug('Searching key {}'.format(key))", "try: os.makedirs(os.path.dirname(config_file)) except OSError as exc: # Guard against race condition click.echo('Directory found!", "def get_config_path(): homedir = os.environ.get('HOME', None) if not homedir: click.echo('Home Directory Not found!!", "') exit() logger.debug('Home Directory: {}'.format(homedir)) config_file = os.path.join(homedir + CONFIG_FILE_PATH) logger.debug('Config File Location:", "present') try: os.makedirs(os.path.dirname(config_file)) except OSError as exc: # Guard against race condition click.echo('Directory", "not config file') logger.debug('Creating config file') file = open(config_file, 'w') file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file))", "configparser config = configparser.ConfigParser() def get_config_path(): homedir = os.environ.get('HOME', None) if not homedir:", "exc: # Guard against race condition click.echo('Directory found! but not config file') logger.debug('Creating", "key): if has_env(profile, key): return config.get(profile, key) logger.debug('Not found in current profile') click.echo('Value", "logger.debug(config.read(config_file)) return config_file config_file = get_config_path() def set_env(profile, key, value): if not config.has_section(profile):", "exit() def has_env(profile, key): if profile: logger.debug('Searching in profile : {}'.format(profile)) logger.debug('Searching key", "Guard against race condition click.echo('Directory found! but not config file') logger.debug('Creating config file')", "use `cgccli config` command'.format(profile=profile)) exit() def has_env(profile, key): if profile: logger.debug('Searching in profile", "has_env(profile, key): if profile: logger.debug('Searching in profile : {}'.format(profile)) logger.debug('Searching key {}'.format(key)) return", "open(config_file, 'w') as configfile: config.write(configfile) def get_env(profile, key): if has_env(profile, key): return config.get(profile,", "logger.debug('Not found in current profile') click.echo('Value not found in {profile} use `cgccli config`", "config.has_section(profile): click.echo('Section [{}] already exists!!'.format(profile)) return config.add_section(profile) write_config() def write_config(): with open(config_file, 'w')", "in profile : {}'.format(profile)) logger.debug('Searching key {}'.format(key)) return config.has_option(profile, key) return False def", "as logger from utils.const import CONFIG_FILE_PATH if sys.version_info[0] == 2: import ConfigParser as", "config.has_section(profile): config.add_section(profile) config.set(profile, key, value) write_config() def add_profile(profile): if config.has_section(profile): click.echo('Section [{}] already", "profile') click.echo('Value not found in {profile} use `cgccli config` command'.format(profile=profile)) exit() def has_env(profile,", "profile : {}'.format(profile)) logger.debug('Searching key {}'.format(key)) return config.has_option(profile, key) return False def get_profiles():", "= os.path.join(homedir + CONFIG_FILE_PATH) logger.debug('Config File Location: {}'.format(config_file)) if not os.path.exists(config_file): click.echo('ERROR: No", "set_env(profile, key, value): if not config.has_section(profile): config.add_section(profile) config.set(profile, key, value) write_config() def add_profile(profile):", "Environment `HOME` ') exit() logger.debug('Home Directory: {}'.format(homedir)) config_file = os.path.join(homedir + CONFIG_FILE_PATH) logger.debug('Config", "in {profile} use `cgccli config` command'.format(profile=profile)) exit() def has_env(profile, key): if profile: logger.debug('Searching", "Location: {}'.format(config_file)) if not os.path.exists(config_file): click.echo('ERROR: No Config file present') try: os.makedirs(os.path.dirname(config_file)) except", "if not os.path.exists(config_file): click.echo('ERROR: No Config file present') try: os.makedirs(os.path.dirname(config_file)) except OSError as", "if config.has_section(profile): click.echo('Section [{}] already exists!!'.format(profile)) return config.add_section(profile) write_config() def write_config(): with open(config_file,", "config` command'.format(profile=profile)) exit() def has_env(profile, key): if profile: logger.debug('Searching in profile : {}'.format(profile))", "Set Environment `HOME` ') exit() logger.debug('Home Directory: {}'.format(homedir)) config_file = os.path.join(homedir + CONFIG_FILE_PATH)", "click.echo('Directory found! but not config file') logger.debug('Creating config file') file = open(config_file, 'w')", "File Location: {}'.format(config_file)) if not os.path.exists(config_file): click.echo('ERROR: No Config file present') try: os.makedirs(os.path.dirname(config_file))", "homedir: click.echo('Home Directory Not found!! Set Environment `HOME` ') exit() logger.debug('Home Directory: {}'.format(homedir))", "configparser.ConfigParser() def get_config_path(): homedir = os.environ.get('HOME', None) if not homedir: click.echo('Home Directory Not", "if not homedir: click.echo('Home Directory Not found!! Set Environment `HOME` ') exit() logger.debug('Home", "file') file = open(config_file, 'w') file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file)) return config_file config_file = get_config_path()", "from utils.const import CONFIG_FILE_PATH if sys.version_info[0] == 2: import ConfigParser as configparser else:", "import configparser config = configparser.ConfigParser() def get_config_path(): homedir = os.environ.get('HOME', None) if not", "Not found!! Set Environment `HOME` ') exit() logger.debug('Home Directory: {}'.format(homedir)) config_file = os.path.join(homedir", "import click import utils.logger as logger from utils.const import CONFIG_FILE_PATH if sys.version_info[0] ==", "def write_config(): with open(config_file, 'w') as configfile: config.write(configfile) def get_env(profile, key): if has_env(profile,", "+ CONFIG_FILE_PATH) logger.debug('Config File Location: {}'.format(config_file)) if not os.path.exists(config_file): click.echo('ERROR: No Config file", "= open(config_file, 'w') file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file)) return config_file config_file = get_config_path() def set_env(profile,", "profile: logger.debug('Searching in profile : {}'.format(profile)) logger.debug('Searching key {}'.format(key)) return config.has_option(profile, key) return", "{}'.format(homedir)) config_file = os.path.join(homedir + CONFIG_FILE_PATH) logger.debug('Config File Location: {}'.format(config_file)) if not os.path.exists(config_file):", "key, value): if not config.has_section(profile): config.add_section(profile) config.set(profile, key, value) write_config() def add_profile(profile): if", "logger.debug('Creating config file') file = open(config_file, 'w') file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file)) return config_file config_file", "No Config file present') try: os.makedirs(os.path.dirname(config_file)) except OSError as exc: # Guard against", "file = open(config_file, 'w') file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file)) return config_file config_file = get_config_path() def", "config.set(profile, key, value) write_config() def add_profile(profile): if config.has_section(profile): click.echo('Section [{}] already exists!!'.format(profile)) return", "def get_env(profile, key): if has_env(profile, key): return config.get(profile, key) logger.debug('Not found in current", "`HOME` ') exit() logger.debug('Home Directory: {}'.format(homedir)) config_file = os.path.join(homedir + CONFIG_FILE_PATH) logger.debug('Config File", ": {}'.format(profile)) logger.debug('Searching key {}'.format(key)) return config.has_option(profile, key) return False def get_profiles(): return", "logger from utils.const import CONFIG_FILE_PATH if sys.version_info[0] == 2: import ConfigParser as configparser", "against race condition click.echo('Directory found! but not config file') logger.debug('Creating config file') file", "configparser else: import configparser config = configparser.ConfigParser() def get_config_path(): homedir = os.environ.get('HOME', None)", "return config.add_section(profile) write_config() def write_config(): with open(config_file, 'w') as configfile: config.write(configfile) def get_env(profile,", "but not config file') logger.debug('Creating config file') file = open(config_file, 'w') file.write('[{}]'.format('global')) file.close()", "'w') file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file)) return config_file config_file = get_config_path() def set_env(profile, key, value):", "return config.get(profile, key) logger.debug('Not found in current profile') click.echo('Value not found in {profile}", "found in {profile} use `cgccli config` command'.format(profile=profile)) exit() def has_env(profile, key): if profile:", "Directory Not found!! Set Environment `HOME` ') exit() logger.debug('Home Directory: {}'.format(homedir)) config_file =", "write_config(): with open(config_file, 'w') as configfile: config.write(configfile) def get_env(profile, key): if has_env(profile, key):", "OSError as exc: # Guard against race condition click.echo('Directory found! but not config", "config = configparser.ConfigParser() def get_config_path(): homedir = os.environ.get('HOME', None) if not homedir: click.echo('Home", "get_config_path() def set_env(profile, key, value): if not config.has_section(profile): config.add_section(profile) config.set(profile, key, value) write_config()", "def set_env(profile, key, value): if not config.has_section(profile): config.add_section(profile) config.set(profile, key, value) write_config() def", "homedir = os.environ.get('HOME', None) if not homedir: click.echo('Home Directory Not found!! Set Environment", "config file') file = open(config_file, 'w') file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file)) return config_file config_file =", "key): return config.get(profile, key) logger.debug('Not found in current profile') click.echo('Value not found in", "click.echo('Section [{}] already exists!!'.format(profile)) return config.add_section(profile) write_config() def write_config(): with open(config_file, 'w') as", "add_profile(profile): if config.has_section(profile): click.echo('Section [{}] already exists!!'.format(profile)) return config.add_section(profile) write_config() def write_config(): with", "logger.debug('Searching in profile : {}'.format(profile)) logger.debug('Searching key {}'.format(key)) return config.has_option(profile, key) return False", "= get_config_path() def set_env(profile, key, value): if not config.has_section(profile): config.add_section(profile) config.set(profile, key, value)", "already exists!!'.format(profile)) return config.add_section(profile) write_config() def write_config(): with open(config_file, 'w') as configfile: config.write(configfile)", "not os.path.exists(config_file): click.echo('ERROR: No Config file present') try: os.makedirs(os.path.dirname(config_file)) except OSError as exc:", "found!! Set Environment `HOME` ') exit() logger.debug('Home Directory: {}'.format(homedir)) config_file = os.path.join(homedir +", "click.echo('Home Directory Not found!! Set Environment `HOME` ') exit() logger.debug('Home Directory: {}'.format(homedir)) config_file", "get_config_path(): homedir = os.environ.get('HOME', None) if not homedir: click.echo('Home Directory Not found!! Set", "open(config_file, 'w') file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file)) return config_file config_file = get_config_path() def set_env(profile, key,", "config.add_section(profile) config.set(profile, key, value) write_config() def add_profile(profile): if config.has_section(profile): click.echo('Section [{}] already exists!!'.format(profile))", "[{}] already exists!!'.format(profile)) return config.add_section(profile) write_config() def write_config(): with open(config_file, 'w') as configfile:", "utils.const import CONFIG_FILE_PATH if sys.version_info[0] == 2: import ConfigParser as configparser else: import", "return config_file config_file = get_config_path() def set_env(profile, key, value): if not config.has_section(profile): config.add_section(profile)", "key) logger.debug('Not found in current profile') click.echo('Value not found in {profile} use `cgccli", "if sys.version_info[0] == 2: import ConfigParser as configparser else: import configparser config =", "CONFIG_FILE_PATH if sys.version_info[0] == 2: import ConfigParser as configparser else: import configparser config", "`cgccli config` command'.format(profile=profile)) exit() def has_env(profile, key): if profile: logger.debug('Searching in profile :", "if not config.has_section(profile): config.add_section(profile) config.set(profile, key, value) write_config() def add_profile(profile): if config.has_section(profile): click.echo('Section", "configfile: config.write(configfile) def get_env(profile, key): if has_env(profile, key): return config.get(profile, key) logger.debug('Not found", "click.echo('ERROR: No Config file present') try: os.makedirs(os.path.dirname(config_file)) except OSError as exc: # Guard", "race condition click.echo('Directory found! but not config file') logger.debug('Creating config file') file =", "{}'.format(config_file)) if not os.path.exists(config_file): click.echo('ERROR: No Config file present') try: os.makedirs(os.path.dirname(config_file)) except OSError", "== 2: import ConfigParser as configparser else: import configparser config = configparser.ConfigParser() def", "config.add_section(profile) write_config() def write_config(): with open(config_file, 'w') as configfile: config.write(configfile) def get_env(profile, key):", "config_file = get_config_path() def set_env(profile, key, value): if not config.has_section(profile): config.add_section(profile) config.set(profile, key,", "import CONFIG_FILE_PATH if sys.version_info[0] == 2: import ConfigParser as configparser else: import configparser", "2: import ConfigParser as configparser else: import configparser config = configparser.ConfigParser() def get_config_path():", "sys import click import utils.logger as logger from utils.const import CONFIG_FILE_PATH if sys.version_info[0]", "Directory: {}'.format(homedir)) config_file = os.path.join(homedir + CONFIG_FILE_PATH) logger.debug('Config File Location: {}'.format(config_file)) if not", "not homedir: click.echo('Home Directory Not found!! Set Environment `HOME` ') exit() logger.debug('Home Directory:", "file.close() logger.debug(config.read(config_file)) return config_file config_file = get_config_path() def set_env(profile, key, value): if not", "{profile} use `cgccli config` command'.format(profile=profile)) exit() def has_env(profile, key): if profile: logger.debug('Searching in", "import ConfigParser as configparser else: import configparser config = configparser.ConfigParser() def get_config_path(): homedir", "as exc: # Guard against race condition click.echo('Directory found! but not config file')", "os.path.exists(config_file): click.echo('ERROR: No Config file present') try: os.makedirs(os.path.dirname(config_file)) except OSError as exc: #", "found! but not config file') logger.debug('Creating config file') file = open(config_file, 'w') file.write('[{}]'.format('global'))", "# Guard against race condition click.echo('Directory found! but not config file') logger.debug('Creating config", "sys.version_info[0] == 2: import ConfigParser as configparser else: import configparser config = configparser.ConfigParser()", "exit() logger.debug('Home Directory: {}'.format(homedir)) config_file = os.path.join(homedir + CONFIG_FILE_PATH) logger.debug('Config File Location: {}'.format(config_file))", "None) if not homedir: click.echo('Home Directory Not found!! Set Environment `HOME` ') exit()", "config_file = os.path.join(homedir + CONFIG_FILE_PATH) logger.debug('Config File Location: {}'.format(config_file)) if not os.path.exists(config_file): click.echo('ERROR:", "ConfigParser as configparser else: import configparser config = configparser.ConfigParser() def get_config_path(): homedir =", "os.environ.get('HOME', None) if not homedir: click.echo('Home Directory Not found!! Set Environment `HOME` ')", "os.makedirs(os.path.dirname(config_file)) except OSError as exc: # Guard against race condition click.echo('Directory found! but", "with open(config_file, 'w') as configfile: config.write(configfile) def get_env(profile, key): if has_env(profile, key): return", "found in current profile') click.echo('Value not found in {profile} use `cgccli config` command'.format(profile=profile))", "key): if profile: logger.debug('Searching in profile : {}'.format(profile)) logger.debug('Searching key {}'.format(key)) return config.has_option(profile,", "else: import configparser config = configparser.ConfigParser() def get_config_path(): homedir = os.environ.get('HOME', None) if", "get_env(profile, key): if has_env(profile, key): return config.get(profile, key) logger.debug('Not found in current profile')", "config.get(profile, key) logger.debug('Not found in current profile') click.echo('Value not found in {profile} use", "<reponame>jelenko5/cgccli import os import sys import click import utils.logger as logger from utils.const", "Config file present') try: os.makedirs(os.path.dirname(config_file)) except OSError as exc: # Guard against race", "value): if not config.has_section(profile): config.add_section(profile) config.set(profile, key, value) write_config() def add_profile(profile): if config.has_section(profile):", "os.path.join(homedir + CONFIG_FILE_PATH) logger.debug('Config File Location: {}'.format(config_file)) if not os.path.exists(config_file): click.echo('ERROR: No Config", "as configparser else: import configparser config = configparser.ConfigParser() def get_config_path(): homedir = os.environ.get('HOME',", "has_env(profile, key): return config.get(profile, key) logger.debug('Not found in current profile') click.echo('Value not found", "command'.format(profile=profile)) exit() def has_env(profile, key): if profile: logger.debug('Searching in profile : {}'.format(profile)) logger.debug('Searching", "file present') try: os.makedirs(os.path.dirname(config_file)) except OSError as exc: # Guard against race condition", "click import utils.logger as logger from utils.const import CONFIG_FILE_PATH if sys.version_info[0] == 2:", "file.write('[{}]'.format('global')) file.close() logger.debug(config.read(config_file)) return config_file config_file = get_config_path() def set_env(profile, key, value): if", "as configfile: config.write(configfile) def get_env(profile, key): if has_env(profile, key): return config.get(profile, key) logger.debug('Not", "except OSError as exc: # Guard against race condition click.echo('Directory found! but not", "condition click.echo('Directory found! but not config file') logger.debug('Creating config file') file = open(config_file,", "import utils.logger as logger from utils.const import CONFIG_FILE_PATH if sys.version_info[0] == 2: import", "config_file config_file = get_config_path() def set_env(profile, key, value): if not config.has_section(profile): config.add_section(profile) config.set(profile,", "in current profile') click.echo('Value not found in {profile} use `cgccli config` command'.format(profile=profile)) exit()", "import os import sys import click import utils.logger as logger from utils.const import", "def add_profile(profile): if config.has_section(profile): click.echo('Section [{}] already exists!!'.format(profile)) return config.add_section(profile) write_config() def write_config():", "write_config() def add_profile(profile): if config.has_section(profile): click.echo('Section [{}] already exists!!'.format(profile)) return config.add_section(profile) write_config() def", "utils.logger as logger from utils.const import CONFIG_FILE_PATH if sys.version_info[0] == 2: import ConfigParser", "{}'.format(profile)) logger.debug('Searching key {}'.format(key)) return config.has_option(profile, key) return False def get_profiles(): return config.sections()" ]
[ "writing, software # distributed under the License is distributed on an \"AS IS\"", "# pylint: disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This class is just to make sure", "1, 1]]) assert round(abs(0.0 + 0.0j - test.contract(wfn, wfn)), 7) == 0 assert", "KIND, either express or implied. # See the License for the specific language", "import wavefunction def test_operator(): \"\"\"Testing abstract FqeOperator class using a dummy class\"\"\" #", "Unless required by applicable law or agreed to in writing, software # distributed", "- test.contract(wfn, wfn)), 7) == 0 assert \"fqe-operator\" == test.representation() assert 0 ==", "using a dummy class\"\"\" # pylint: disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This class is", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "super().representation() def rank(self) -> int: return super().rank() test = TestFQEOperator() wfn = wavefunction.Wavefunction([[1,", "pylint: disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This class is just to make sure the", "License. # You may obtain a copy of the License at # #", "class is tested. \"\"\" def contract( self, brastate: \"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\", ) ->", "complex: return super().contract(brastate, ketstate) def representation(self) -> str: return super().representation() def rank(self) ->", "fqe.fqe_ops import fqe_operator from fqe import wavefunction def test_operator(): \"\"\"Testing abstract FqeOperator class", "contract( self, brastate: \"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\", ) -> complex: return super().contract(brastate, ketstate) def", "Copyright 2020 Google LLC # Licensed under the Apache License, Version 2.0 (the", "law or agreed to in writing, software # distributed under the License is", "test_operator(): \"\"\"Testing abstract FqeOperator class using a dummy class\"\"\" # pylint: disable=useless-super-delegation class", "the License for the specific language governing permissions and # limitations under the", "compliance with the License. # You may obtain a copy of the License", "the specific language governing permissions and # limitations under the License. \"\"\"Tests for", "FqeOperator class using a dummy class\"\"\" # pylint: disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This", "ketstate) def representation(self) -> str: return super().representation() def rank(self) -> int: return super().rank()", "wavefunction def test_operator(): \"\"\"Testing abstract FqeOperator class using a dummy class\"\"\" # pylint:", "super().contract(brastate, ketstate) def representation(self) -> str: return super().representation() def rank(self) -> int: return", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "assert round(abs(0.0 + 0.0j - test.contract(wfn, wfn)), 7) == 0 assert \"fqe-operator\" ==", "+ 0.0j - test.contract(wfn, wfn)), 7) == 0 assert \"fqe-operator\" == test.representation() assert", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "under the License. \"\"\"Tests for FqeOperator.\"\"\" from fqe.fqe_ops import fqe_operator from fqe import", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "tested. \"\"\" def contract( self, brastate: \"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\", ) -> complex: return", "specific language governing permissions and # limitations under the License. \"\"\"Tests for FqeOperator.\"\"\"", "limitations under the License. \"\"\"Tests for FqeOperator.\"\"\" from fqe.fqe_ops import fqe_operator from fqe", "-> str: return super().representation() def rank(self) -> int: return super().rank() test = TestFQEOperator()", "ANY KIND, either express or implied. # See the License for the specific", "just to make sure the abstract FqeOperator class is tested. \"\"\" def contract(", "wfn = wavefunction.Wavefunction([[1, 1, 1]]) assert round(abs(0.0 + 0.0j - test.contract(wfn, wfn)), 7)", "from fqe import wavefunction def test_operator(): \"\"\"Testing abstract FqeOperator class using a dummy", "make sure the abstract FqeOperator class is tested. \"\"\" def contract( self, brastate:", "\"\"\" This class is just to make sure the abstract FqeOperator class is", "return super().representation() def rank(self) -> int: return super().rank() test = TestFQEOperator() wfn =", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "for the specific language governing permissions and # limitations under the License. \"\"\"Tests", "use this file except in compliance with the License. # You may obtain", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "not use this file except in compliance with the License. # You may", "def contract( self, brastate: \"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\", ) -> complex: return super().contract(brastate, ketstate)", "representation(self) -> str: return super().representation() def rank(self) -> int: return super().rank() test =", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "class using a dummy class\"\"\" # pylint: disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This class", "the License. \"\"\"Tests for FqeOperator.\"\"\" from fqe.fqe_ops import fqe_operator from fqe import wavefunction", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "LLC # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "License, Version 2.0 (the \"License\"); # you may not use this file except", "test.contract(wfn, wfn)), 7) == 0 assert \"fqe-operator\" == test.representation() assert 0 == test.rank()", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "to make sure the abstract FqeOperator class is tested. \"\"\" def contract( self,", "fqe_operator from fqe import wavefunction def test_operator(): \"\"\"Testing abstract FqeOperator class using a", "the abstract FqeOperator class is tested. \"\"\" def contract( self, brastate: \"wavefunction.Wavefunction\", ketstate:", "OF ANY KIND, either express or implied. # See the License for the", "is just to make sure the abstract FqeOperator class is tested. \"\"\" def", "2.0 (the \"License\"); # you may not use this file except in compliance", "def test_operator(): \"\"\"Testing abstract FqeOperator class using a dummy class\"\"\" # pylint: disable=useless-super-delegation", "TestFQEOperator() wfn = wavefunction.Wavefunction([[1, 1, 1]]) assert round(abs(0.0 + 0.0j - test.contract(wfn, wfn)),", "1]]) assert round(abs(0.0 + 0.0j - test.contract(wfn, wfn)), 7) == 0 assert \"fqe-operator\"", "round(abs(0.0 + 0.0j - test.contract(wfn, wfn)), 7) == 0 assert \"fqe-operator\" == test.representation()", "# you may not use this file except in compliance with the License.", "agreed to in writing, software # distributed under the License is distributed on", "permissions and # limitations under the License. \"\"\"Tests for FqeOperator.\"\"\" from fqe.fqe_ops import", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "super().rank() test = TestFQEOperator() wfn = wavefunction.Wavefunction([[1, 1, 1]]) assert round(abs(0.0 + 0.0j", "abstract FqeOperator class using a dummy class\"\"\" # pylint: disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator): \"\"\"", "is tested. \"\"\" def contract( self, brastate: \"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\", ) -> complex:", "(the \"License\"); # you may not use this file except in compliance with", "fqe import wavefunction def test_operator(): \"\"\"Testing abstract FqeOperator class using a dummy class\"\"\"", "# # Unless required by applicable law or agreed to in writing, software", "for FqeOperator.\"\"\" from fqe.fqe_ops import fqe_operator from fqe import wavefunction def test_operator(): \"\"\"Testing", "express or implied. # See the License for the specific language governing permissions", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This class is just to make sure the abstract", "except in compliance with the License. # You may obtain a copy of", "FqeOperator class is tested. \"\"\" def contract( self, brastate: \"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\", )", "by applicable law or agreed to in writing, software # distributed under the", "ketstate: \"wavefunction.Wavefunction\", ) -> complex: return super().contract(brastate, ketstate) def representation(self) -> str: return", "sure the abstract FqeOperator class is tested. \"\"\" def contract( self, brastate: \"wavefunction.Wavefunction\",", "class TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This class is just to make sure the abstract FqeOperator", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "either express or implied. # See the License for the specific language governing", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "FqeOperator.\"\"\" from fqe.fqe_ops import fqe_operator from fqe import wavefunction def test_operator(): \"\"\"Testing abstract", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "class\"\"\" # pylint: disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This class is just to make", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "0.0j - test.contract(wfn, wfn)), 7) == 0 assert \"fqe-operator\" == test.representation() assert 0", "\"wavefunction.Wavefunction\", ) -> complex: return super().contract(brastate, ketstate) def representation(self) -> str: return super().representation()", "int: return super().rank() test = TestFQEOperator() wfn = wavefunction.Wavefunction([[1, 1, 1]]) assert round(abs(0.0", "file except in compliance with the License. # You may obtain a copy", ") -> complex: return super().contract(brastate, ketstate) def representation(self) -> str: return super().representation() def", "abstract FqeOperator class is tested. \"\"\" def contract( self, brastate: \"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\",", "brastate: \"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\", ) -> complex: return super().contract(brastate, ketstate) def representation(self) ->", "-> int: return super().rank() test = TestFQEOperator() wfn = wavefunction.Wavefunction([[1, 1, 1]]) assert", "-> complex: return super().contract(brastate, ketstate) def representation(self) -> str: return super().representation() def rank(self)", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "License for the specific language governing permissions and # limitations under the License.", "dummy class\"\"\" # pylint: disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This class is just to", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This class is just to make sure the abstract FqeOperator class", "\"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\", ) -> complex: return super().contract(brastate, ketstate) def representation(self) -> str:", "from fqe.fqe_ops import fqe_operator from fqe import wavefunction def test_operator(): \"\"\"Testing abstract FqeOperator", "the License. # You may obtain a copy of the License at #", "import fqe_operator from fqe import wavefunction def test_operator(): \"\"\"Testing abstract FqeOperator class using", "\"\"\"Testing abstract FqeOperator class using a dummy class\"\"\" # pylint: disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator):", "to in writing, software # distributed under the License is distributed on an", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "\"\"\"Tests for FqeOperator.\"\"\" from fqe.fqe_ops import fqe_operator from fqe import wavefunction def test_operator():", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "License. \"\"\"Tests for FqeOperator.\"\"\" from fqe.fqe_ops import fqe_operator from fqe import wavefunction def", "implied. # See the License for the specific language governing permissions and #", "= wavefunction.Wavefunction([[1, 1, 1]]) assert round(abs(0.0 + 0.0j - test.contract(wfn, wfn)), 7) ==", "\"License\"); # you may not use this file except in compliance with the", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "2020 Google LLC # Licensed under the Apache License, Version 2.0 (the \"License\");", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "language governing permissions and # limitations under the License. \"\"\"Tests for FqeOperator.\"\"\" from", "required by applicable law or agreed to in writing, software # distributed under", "# limitations under the License. \"\"\"Tests for FqeOperator.\"\"\" from fqe.fqe_ops import fqe_operator from", "and # limitations under the License. \"\"\"Tests for FqeOperator.\"\"\" from fqe.fqe_ops import fqe_operator", "rank(self) -> int: return super().rank() test = TestFQEOperator() wfn = wavefunction.Wavefunction([[1, 1, 1]])", "wavefunction.Wavefunction([[1, 1, 1]]) assert round(abs(0.0 + 0.0j - test.contract(wfn, wfn)), 7) == 0", "class is just to make sure the abstract FqeOperator class is tested. \"\"\"", "governing permissions and # limitations under the License. \"\"\"Tests for FqeOperator.\"\"\" from fqe.fqe_ops", "applicable law or agreed to in writing, software # distributed under the License", "return super().contract(brastate, ketstate) def representation(self) -> str: return super().representation() def rank(self) -> int:", "This class is just to make sure the abstract FqeOperator class is tested.", "str: return super().representation() def rank(self) -> int: return super().rank() test = TestFQEOperator() wfn", "\"\"\" def contract( self, brastate: \"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\", ) -> complex: return super().contract(brastate,", "Google LLC # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "or agreed to in writing, software # distributed under the License is distributed", "return super().rank() test = TestFQEOperator() wfn = wavefunction.Wavefunction([[1, 1, 1]]) assert round(abs(0.0 +", "def representation(self) -> str: return super().representation() def rank(self) -> int: return super().rank() test", "or implied. # See the License for the specific language governing permissions and", "= TestFQEOperator() wfn = wavefunction.Wavefunction([[1, 1, 1]]) assert round(abs(0.0 + 0.0j - test.contract(wfn,", "a dummy class\"\"\" # pylint: disable=useless-super-delegation class TestFQEOperator(fqe_operator.FqeOperator): \"\"\" This class is just", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "# Copyright 2020 Google LLC # Licensed under the Apache License, Version 2.0", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "self, brastate: \"wavefunction.Wavefunction\", ketstate: \"wavefunction.Wavefunction\", ) -> complex: return super().contract(brastate, ketstate) def representation(self)", "with the License. # You may obtain a copy of the License at", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "def rank(self) -> int: return super().rank() test = TestFQEOperator() wfn = wavefunction.Wavefunction([[1, 1,", "test = TestFQEOperator() wfn = wavefunction.Wavefunction([[1, 1, 1]]) assert round(abs(0.0 + 0.0j -", "under the Apache License, Version 2.0 (the \"License\"); # you may not use" ]
[ "from ifcjson.ifc2json4 import IFC2JSON4 from ifcjson.ifc2json5a import IFC2JSON5a # from ifcjson.to_ifcopenshell import JSON2IFC", "<gh_stars>10-100 from ifcjson.ifc2json4 import IFC2JSON4 from ifcjson.ifc2json5a import IFC2JSON5a # from ifcjson.to_ifcopenshell import" ]
[ "return info['redis_version'] else: return info['Server']['redis_version'] class test_script(redisb.RedisScript): script = (redisb.read_lua_file('commands.utils'), '''\\ local js", "()) # ZSET SCRIPTING COMMANDS def test_zdiffstore(self): yield self.multi_async((self.make_zset('aa', {'a1': 1, 'a2': 1,", "self.client.zdiffstore('zb', ['ab', 'bb', 'cb'], withscores=True) self.assertEqual(n, 2) r = yield self.client.zrange('zb', 0, -1,", "self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ()) # ZSET SCRIPTING COMMANDS def test_zdiffstore(self): yield self.multi_async((self.make_zset('aa', {'a1':", "[b'd']) def test_zdiffstore_withscores2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3,", "0) def testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo','bla'), 's') self.assertEqual(len(r),", "[1,2,3,4,5,6,7,8]) def testMove2ZSet(self): client = self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r = yield client.execute_script('move2set',", "def testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo','bla'), 's') self.assertEqual(len(r), 2)", "{'a1': 2, 'a3': 2, 'a4': 2}), self.make_zset('ca', {'a1': 6, 'a3': 5, 'a4': 4})))", "res = yield self.client.zpopbyscore('foo', 0, 4.5) self.assertEqual(res, [b'a', b'c', b'd']) rem = yield", "def test_zdiffstore2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c',", "yield self.client.zadd('foo', 1, 'a', 2, 'b', 3, 'c', 4, 'd', 5, 'e') res", "self.make_zset('bb', {'a1': 1, 'a3': 1, 'a4': 2}), self.make_zset('cb', {'a1': 3, 'a3': 1, 'a4':", "'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET', *items)) N = yield c.delpattern('bla*') self.assertEqual(N, 4)", "yield self.client.zrange('za', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1)]) def test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab', {'a1':", "getdb from stdnet.backends import redisb from stdnet.utils import test, flatzset def get_version(info): if", "test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res = yield self.client.zpopbyrank('foo',0) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e'])", "from stdnet.backends import redisb from stdnet.utils import test, flatzset def get_version(info): if 'redis_version'", "[b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self): r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo',", "b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self): r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r = yield", "'redis_version' in info: return info['redis_version'] else: return info['Server']['redis_version'] class test_script(redisb.RedisScript): script = (redisb.read_lua_file('commands.utils'),", "self.client.zpopbyscore('foo', 2) rem = yield self.client.zrange('foo', 0, -1) self.assertEqual(len(rem), 4) self.assertEqual(rem, [b'a', b'c',", "r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1)", "self.assertEqual(sorted(m1), [b'd', b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self): r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r", "'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 0) def testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8)))", "sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def test_del_pattern(self): c = self.client items = ('bla',1, 'bla1','ciao', 'bla2','foo', 'xxxx','moon',", "2) def testMove2Set(self): yield self.multi_async((self.client.sadd('foo', 1, 2, 3, 4, 5), self.client.lpush('bla', 4, 5,", "3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 2, 'b', 100, 'c'))) r =", "size = yield c.dbsize() self.assertTrue(size >= 0) def test_script_meta(self): script = redisb.get_script('test_script') self.assertTrue(script.script)", "= sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def test_del_pattern(self): c = self.client items = ('bla',1, 'bla1','ciao', 'bla2','foo',", "'''\\ local js = cjson.decode(ARGV[1]) return cjson.encode(js)''') def callback(self, request, result, args, **options):", "withscores=True) self.assertEqual(n, 2) r = yield self.client.zrange('zb', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1),", "self.client.zadd('foo', 1, 'a', 2, 'b', 3, 'c', 4, 'd', 5, 'e') res =", "'bla'), self.client.sunionstore('res2', 'foo', 'bla'))) m1 = yield self.client.smembers('res1') m2 = yield self.client.smembers('res2') m1", "= yield c.zrange('s3', 0, -1, withscores=True) self.assertEqual(dict(r), {b'a': -5.0, b'c': -97.0, b'd': 4.0})", "2, 'a3': 2, 'a4': 2}), self.make_zset('ca', {'a1': 6, 'a3': 5, 'a4': 4}))) n", "1, 'a4': 2}), self.make_zset('cb', {'a1': 3, 'a3': 1, 'a4': 4}))) n = yield", "self.assertEqual(r[1], 2) def test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ()) # ZSET SCRIPTING COMMANDS def", "yield c.zrange('s3', 0, -1, withscores=True) self.assertEqual(dict(r), {b'a': -5.0, b'c': -97.0, b'd': 4.0}) def", "1, 'a3': 2}), self.make_zset('bb', {'a1': 1, 'a3': 1, 'a4': 2}), self.make_zset('cb', {'a1': 3,", "2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'), self.client.sunionstore('res2', 'foo', 'bla'))) m1", "'d'), c.zadd('s2', 6, 'a', 2, 'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1',", "yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'), b'moon') N = yield c.delpattern('x*') self.assertEqual(N, 2) def testMove2Set(self):", "for redis client.''' import json from hashlib import sha1 from stdnet import getdb", "yield c.zdiffstore('s3', ('s1', 's2'), withscores=True) self.async.assertEqual(c.zcard('s3'), 3) r = yield c.zrange('s3', 0, -1,", "*l) self.assertEqual(self.client.llen(name), len(l)) def make_zset(self, name, d): self.client.zadd(name, *flatzset(kwargs=d)) class TestExtraClientCommands(TestCase): def test_coverage(self):", "self.assertEqual(len(rem), 4) self.assertEqual(rem, [b'a', b'c', b'd', b'e']) self.assertEqual(res, [b'b']) res = yield self.client.zpopbyscore('foo',", "yield self.client.zrange('foo', 0, -1) self.assertEqual(len(rem), 4) self.assertEqual(rem, [b'a', b'c', b'd', b'e']) self.assertEqual(res, [b'b'])", "return self.client.flushdb() def make_hash(self, key, d): for k, v in d.items(): self.client.hset(key, k,", "0) def test_script_meta(self): script = redisb.get_script('test_script') self.assertTrue(script.script) sha = sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def test_del_pattern(self):", "2) self.assertEqual(r[1], 0) def testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo','bla'),", "def test_zpop_byscore(self): yield self.client.zadd('foo', 1, 'a', 2, 'b', 3, 'c', 4, 'd', 5,", "yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0],", "0, -1) self.assertEqual(r, [b'd']) def test_zdiffstore_withscores2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a',", "for r in m1)) m2 = sorted((int(r) for r in m2)) self.assertEqual(m1, [4,5])", "'a3': 2, 'a4': 2}), self.make_zset('ca', {'a1': 6, 'a3': 5, 'a4': 4}))) n =", "script = (redisb.read_lua_file('commands.utils'), '''\\ local js = cjson.decode(ARGV[1]) return cjson.encode(js)''') def callback(self, request,", "b'c': -97.0, b'd': 4.0}) def test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res = yield self.client.zpopbyrank('foo',0) rem", "client.zunionstore('res2', ('foo', 'bla')))) m1 = yield client.zrange('res1', 0, -1) m2 = yield client.zrange('res2',", "1, 'a', 2, 'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 2, 'b',", "l = tuple(l) self.client.rpush(name, *l) self.assertEqual(self.client.llen(name), len(l)) def make_zset(self, name, d): self.client.zadd(name, *flatzset(kwargs=d))", "2}), self.make_zset('cb', {'a1': 3, 'a3': 1, 'a4': 4}))) n = yield self.client.zdiffstore('zb', ['ab',", "def callback(self, request, result, args, **options): return json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite): multipledb = 'redis'", "2) self.assertEqual(r[1], 2) def test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ()) # ZSET SCRIPTING COMMANDS", "'c', 4, 'd'), c.zadd('s2', 6, 'a', 9, 'b', 100, 'c'))) r = yield", "-5.0, b'c': -97.0, b'd': 4.0}) def test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res = yield self.client.zpopbyrank('foo',0)", "self.client.smembers('res1') m2 = yield self.client.smembers('res2') m1 = sorted((int(r) for r in m1)) m2", "def testMove2Set(self): yield self.multi_async((self.client.sadd('foo', 1, 2, 3, 4, 5), self.client.lpush('bla', 4, 5, 6,", "m2 = sorted((int(r) for r in m2)) self.assertEqual(m1, [4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def testMove2ZSet(self):", "= sorted((int(r) for r in m1)) m2 = sorted((int(r) for r in m2))", "'a3': 5, 'a4': 4}))) n = yield self.client.zdiffstore('za', ['aa', 'ba', 'ca']) self.assertEqual(n, 1)", "yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def test_zpop_byscore(self): yield self.client.zadd('foo', 1, 'a', 2, 'b', 3, 'c',", "= yield self.client.zpopbyscore('foo', 0, 4.5) self.assertEqual(res, [b'a', b'c', b'd']) rem = yield self.client.zrange('foo',", "'a3': 1, 'a4': 2}), self.make_zset('cb', {'a1': 3, 'a3': 1, 'a4': 4}))) n =", "m2 = yield client.zrange('res2', 0, -1) self.assertEqual(sorted(m1), [b'd', b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self):", ">= 0) def test_script_meta(self): script = redisb.get_script('test_script') self.assertTrue(script.script) sha = sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def", "def test_coverage(self): c = self.backend.client self.assertEqual(c.prefix, '') size = yield c.dbsize() self.assertTrue(size >=", "import json from hashlib import sha1 from stdnet import getdb from stdnet.backends import", "yield self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'), b'moon') N = yield c.delpattern('x*')", "yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')), client.zunionstore('res2', ('foo', 'bla')))) m1 = yield client.zrange('res1', 0, -1)", "'bb', 'cb'], withscores=True) self.assertEqual(n, 2) r = yield self.client.zrange('zb', 0, -1, withscores=True) self.assertEquals(list(r),", "return json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite): multipledb = 'redis' def setUp(self): client = self.backend.client self.client", "6, 'a2': 1, 'a3': 2}), self.make_zset('bb', {'a1': 1, 'a3': 1, 'a4': 2}), self.make_zset('cb',", "local js = cjson.decode(ARGV[1]) return cjson.encode(js)''') def callback(self, request, result, args, **options): return", "{'a1': 1, 'a2': 1, 'a3': 1}), self.make_zset('ba', {'a1': 2, 'a3': 2, 'a4': 2}),", "yield self.client.zrange('zb', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1), (b'a1', 2)]) def test_zdiffstore2(self): c", "r = yield c.zdiffstore('s3', ('s1', 's2'), withscores=True) self.async.assertEqual(c.zcard('s3'), 3) r = yield c.zrange('s3',", "= (redisb.read_lua_file('commands.utils'), '''\\ local js = cjson.decode(ARGV[1]) return cjson.encode(js)''') def callback(self, request, result,", "sha1 from stdnet import getdb from stdnet.backends import redisb from stdnet.utils import test,", "self.assertEqual(res,[b'a']) res = yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def test_zpop_byscore(self):", "2, 3, 4, 5), self.client.lpush('bla', 4, 5, 6, 7, 8))) r = yield", "in m2)) self.assertEqual(m1, [4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def testMove2ZSet(self): client = self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'),", "cjson.decode(ARGV[1]) return cjson.encode(js)''') def callback(self, request, result, args, **options): return json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite):", "0, -1) self.assertEqual(sorted(m1), [b'd', b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self): r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5),", "self.multi_async((client.zinterstore('res1', ('foo', 'bla')), client.zunionstore('res2', ('foo', 'bla')))) m1 = yield client.zrange('res1', 0, -1) m2", "test_script(redisb.RedisScript): script = (redisb.read_lua_file('commands.utils'), '''\\ local js = cjson.decode(ARGV[1]) return cjson.encode(js)''') def callback(self,", "2) self.assertEqual(r[1], 1) yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'), self.client.sunionstore('res2', 'foo', 'bla'))) m1 = yield", "def test_del_pattern(self): c = self.client items = ('bla',1, 'bla1','ciao', 'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth')", "0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1), (b'a1', 2)]) def test_zdiffstore2(self): c = self.client", "self.assertEqual(N, 4) yield self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'), b'moon')", "yield self.async.assertEqual(c.get('xxxx'), b'moon') N = yield c.delpattern('x*') self.assertEqual(N, 2) def testMove2Set(self): yield self.multi_async((self.client.sadd('foo',", "yield c.delpattern('x*') self.assertEqual(N, 2) def testMove2Set(self): yield self.multi_async((self.client.sadd('foo', 1, 2, 3, 4, 5),", "return info['Server']['redis_version'] class test_script(redisb.RedisScript): script = (redisb.read_lua_file('commands.utils'), '''\\ local js = cjson.decode(ARGV[1]) return", "else: return info['Server']['redis_version'] class test_script(redisb.RedisScript): script = (redisb.read_lua_file('commands.utils'), '''\\ local js = cjson.decode(ARGV[1])", "self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 9,", "= yield client.zrange('res2', 0, -1) self.assertEqual(sorted(m1), [b'd', b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self): r", "1) yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')), client.zunionstore('res2', ('foo', 'bla')))) m1 = yield client.zrange('res1', 0,", "self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')), client.zunionstore('res2', ('foo', 'bla')))) m1 =", "self.client.zrange('foo', 0, -1) self.assertEqual(len(rem), 4) self.assertEqual(rem, [b'a', b'c', b'd', b'e']) self.assertEqual(res, [b'b']) res", "= yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 0) def", "'bla'))) m1 = yield self.client.smembers('res1') m2 = yield self.client.smembers('res2') m1 = sorted((int(r) for", "yield self.client.zpopbyscore('foo', 2) rem = yield self.client.zrange('foo', 0, -1) self.assertEqual(len(rem), 4) self.assertEqual(rem, [b'a',", "request, result, args, **options): return json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite): multipledb = 'redis' def setUp(self):", "yield c.dbsize() self.assertTrue(size >= 0) def test_script_meta(self): script = redisb.get_script('test_script') self.assertTrue(script.script) sha =", "items = ('bla',1, 'bla1','ciao', 'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET', *items)) N =", "self.multi_async((self.client.sadd('foo', 1, 2, 3, 4, 5), self.client.lpush('bla', 4, 5, 6, 7, 8))) r", "'bla')), client.zunionstore('res2', ('foo', 'bla')))) m1 = yield client.zrange('res1', 0, -1) m2 = yield", "'s') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 0) def testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r", "[(b'a2', 1), (b'a1', 2)]) def test_zdiffstore2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a',", "self.assertTrue(script.script) sha = sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def test_del_pattern(self): c = self.client items = ('bla',1,", "6, 'a', 2, 'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2'), withscores=True)", "self.client.smembers('res2') m1 = sorted((int(r) for r in m1)) m2 = sorted((int(r) for r", "def testMove2ZSet(self): client = self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r = yield client.execute_script('move2set', ('foo','bla'),", "def make_list(self, name, l): l = tuple(l) self.client.rpush(name, *l) self.assertEqual(self.client.llen(name), len(l)) def make_zset(self,", "withscores=True) self.async.assertEqual(c.zcard('s3'), 3) r = yield c.zrange('s3', 0, -1, withscores=True) self.assertEqual(dict(r), {b'a': -5.0,", "yield self.client.smembers('res2') m1 = sorted((int(r) for r in m1)) m2 = sorted((int(r) for", "self.client.zpopbyrank('foo',0) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res = yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd'])", "self.assertEqual(res,[b'b',b'c',b'd']) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def test_zpop_byscore(self): yield self.client.zadd('foo', 1, 'a', 2,", "sorted((int(r) for r in m1)) m2 = sorted((int(r) for r in m2)) self.assertEqual(m1,", "self.client.sunionstore('res2', 'foo', 'bla'))) m1 = yield self.client.smembers('res1') m2 = yield self.client.smembers('res2') m1 =", "-1) self.assertEqual(r, [b'd']) def test_zdiffstore_withscores2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2,", "1, 'a', 2, 'b', 3, 'c', 4, 'd', 5, 'e') res = yield", "'d'), c.zadd('s2', 6, 'a', 9, 'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1',", "yield self.async.assertTrue(c.execute_command('MSET', *items)) N = yield c.delpattern('bla*') self.assertEqual(N, 4) yield self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1'))", "v in d.items(): self.client.hset(key, k, v) def make_list(self, name, l): l = tuple(l)", "= yield self.client.zdiffstore('za', ['aa', 'ba', 'ca']) self.assertEqual(n, 1) r = yield self.client.zrange('za', 0,", "def make_zset(self, name, d): self.client.zadd(name, *flatzset(kwargs=d)) class TestExtraClientCommands(TestCase): def test_coverage(self): c = self.backend.client", "info['redis_version'] else: return info['Server']['redis_version'] class test_script(redisb.RedisScript): script = (redisb.read_lua_file('commands.utils'), '''\\ local js =", "def test_script_meta(self): script = redisb.get_script('test_script') self.assertTrue(script.script) sha = sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def test_del_pattern(self): c", "2, 'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2'), withscores=True) self.async.assertEqual(c.zcard('s3'), 3)", "stdnet import getdb from stdnet.backends import redisb from stdnet.utils import test, flatzset def", "cjson.encode(js)''') def callback(self, request, result, args, **options): return json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite): multipledb =", "'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET', *items)) N = yield c.delpattern('bla*') self.assertEqual(N, 4) yield self.async.assertFalse(c.exists('bla')) yield", "self.make_zset('ba', {'a1': 2, 'a3': 2, 'a4': 2}), self.make_zset('ca', {'a1': 6, 'a3': 5, 'a4':", "r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 0)", "-97.0, b'd': 4.0}) def test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res = yield self.client.zpopbyrank('foo',0) rem =", "yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 0) def testMove2List2(self):", "self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo','bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1],", "client.prefixed(self.namespace) def tearDown(self): return self.client.flushdb() def make_hash(self, key, d): for k, v in", "class test_script(redisb.RedisScript): script = (redisb.read_lua_file('commands.utils'), '''\\ local js = cjson.decode(ARGV[1]) return cjson.encode(js)''') def", "result, args, **options): return json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite): multipledb = 'redis' def setUp(self): client", "{'a1': 1, 'a3': 1, 'a4': 2}), self.make_zset('cb', {'a1': 3, 'a3': 1, 'a4': 4})))", "'ba', 'ca']) self.assertEqual(n, 1) r = yield self.client.zrange('za', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2',", "self.client items = ('bla',1, 'bla1','ciao', 'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET', *items)) N", "5, 6, 7, 8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2)", "self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'), self.client.sunionstore('res2', 'foo', 'bla')))", "yield self.multi_async((self.client.sadd('foo', 1, 2, 3, 4, 5), self.client.lpush('bla', 4, 5, 6, 7, 8)))", "2, 'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 9, 'b', 100, 'c')))", "'a2': 1, 'a3': 1}), self.make_zset('ba', {'a1': 2, 'a3': 2, 'a4': 2}), self.make_zset('ca', {'a1':", "self.client.zdiffstore('za', ['aa', 'ba', 'ca']) self.assertEqual(n, 1) r = yield self.client.zrange('za', 0, -1, withscores=True)", "1, 'a3': 1, 'a4': 2}), self.make_zset('cb', {'a1': 3, 'a3': 1, 'a4': 4}))) n", "self.async.assertEqual(c.zcard('s3'), 1) r = yield c.zrange('s3', 0, -1) self.assertEqual(r, [b'd']) def test_zdiffstore_withscores2(self): c", "yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r = yield client.execute_script('move2set', ('foo','bla'), 'z') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2)", "3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 9, 'b', 100, 'c'))) r =", "yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((self.client.sinterstore('res1',", "self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'), self.client.sunionstore('res2', 'foo', 'bla'))) m1 = yield self.client.smembers('res1') m2 = yield", "2}), self.make_zset('bb', {'a1': 1, 'a3': 1, 'a4': 2}), self.make_zset('cb', {'a1': 3, 'a3': 1,", "N = yield c.delpattern('x*') self.assertEqual(N, 2) def testMove2Set(self): yield self.multi_async((self.client.sadd('foo', 1, 2, 3,", "'redis' def setUp(self): client = self.backend.client self.client = client.prefixed(self.namespace) def tearDown(self): return self.client.flushdb()", "= yield c.zdiffstore('s3', ('s1', 's2')) self.async.assertEqual(c.zcard('s3'), 1) r = yield c.zrange('s3', 0, -1)", "'bla1','ciao', 'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET', *items)) N = yield c.delpattern('bla*') self.assertEqual(N,", "'a', 2, 'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 2, 'b', 100,", "100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2')) self.async.assertEqual(c.zcard('s3'), 1) r = yield", "self.client = client.prefixed(self.namespace) def tearDown(self): return self.client.flushdb() def make_hash(self, key, d): for k,", "= self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r = yield client.execute_script('move2set', ('foo','bla'), 'z') self.assertEqual(len(r), 2)", "'s') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 2) def test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ())", "def test_zdiffstore_withscores2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c',", "['aa', 'ba', 'ca']) self.assertEqual(n, 1) r = yield self.client.zrange('za', 0, -1, withscores=True) self.assertEquals(list(r),", "-1, withscores=True) self.assertEqual(dict(r), {b'a': -5.0, b'c': -97.0, b'd': 4.0}) def test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e')", "self.assertEquals(list(r), [(b'a2', 1), (b'a1', 2)]) def test_zdiffstore2(self): c = self.client yield self.multi_async((c.zadd('s1', 1,", "('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 0) def testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5),", "1, 'a', 2, 'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 9, 'b',", "2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 2) def test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ()) # ZSET", "self.assertEqual(rem, [b'a', b'c', b'd', b'e']) self.assertEqual(res, [b'b']) res = yield self.client.zpopbyscore('foo', 0, 4.5)", "'cb'], withscores=True) self.assertEqual(n, 2) r = yield self.client.zrange('zb', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2',", "client.zrange('res1', 0, -1) m2 = yield client.zrange('res2', 0, -1) self.assertEqual(sorted(m1), [b'd', b'e']) self.assertEqual(sorted(m2),", "info: return info['redis_version'] else: return info['Server']['redis_version'] class test_script(redisb.RedisScript): script = (redisb.read_lua_file('commands.utils'), '''\\ local", "= tuple(l) self.client.rpush(name, *l) self.assertEqual(self.client.llen(name), len(l)) def make_zset(self, name, d): self.client.zadd(name, *flatzset(kwargs=d)) class", "test, flatzset def get_version(info): if 'redis_version' in info: return info['redis_version'] else: return info['Server']['redis_version']", "testMoveSetSet(self): r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's')", "= self.backend.client self.client = client.prefixed(self.namespace) def tearDown(self): return self.client.flushdb() def make_hash(self, key, d):", "-1, withscores=True) self.assertEquals(list(r), [(b'a2', 1)]) def test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab', {'a1': 6, 'a2': 1,", "'foo', 'bla'), self.client.sunionstore('res2', 'foo', 'bla'))) m1 = yield self.client.smembers('res1') m2 = yield self.client.smembers('res2')", "0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1)]) def test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab', {'a1': 6, 'a2':", "n = yield self.client.zdiffstore('zb', ['ab', 'bb', 'cb'], withscores=True) self.assertEqual(n, 2) r = yield", "k, v) def make_list(self, name, l): l = tuple(l) self.client.rpush(name, *l) self.assertEqual(self.client.llen(name), len(l))", "('foo', 'bla')))) m1 = yield client.zrange('res1', 0, -1) m2 = yield client.zrange('res2', 0,", "self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self): r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set',", "b'moon') N = yield c.delpattern('x*') self.assertEqual(N, 2) def testMove2Set(self): yield self.multi_async((self.client.sadd('foo', 1, 2,", "= yield self.client.smembers('res1') m2 = yield self.client.smembers('res2') m1 = sorted((int(r) for r in", "**options): return json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite): multipledb = 'redis' def setUp(self): client = self.backend.client", "'d', 5, 'e') res = yield self.client.zpopbyscore('foo', 2) rem = yield self.client.zrange('foo', 0,", "self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'), b'moon') N = yield", "yield client.zrange('res2', 0, -1) self.assertEqual(sorted(m1), [b'd', b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self): r =", "'''Test additional commands for redis client.''' import json from hashlib import sha1 from", "2)]) def test_zdiffstore2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3,", "self.client.hset(key, k, v) def make_list(self, name, l): l = tuple(l) self.client.rpush(name, *l) self.assertEqual(self.client.llen(name),", "c.delpattern('x*') self.assertEqual(N, 2) def testMove2Set(self): yield self.multi_async((self.client.sadd('foo', 1, 2, 3, 4, 5), self.client.lpush('bla',", "= yield self.client.zrange('zb', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1), (b'a1', 2)]) def test_zdiffstore2(self):", "4.0}) def test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res = yield self.client.zpopbyrank('foo',0) rem = yield self.client.zrange('foo',0,-1)", "= yield self.client.smembers('res2') m1 = sorted((int(r) for r in m1)) m2 = sorted((int(r)", "self.assertEqual(r[1], 1) yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')), client.zunionstore('res2', ('foo', 'bla')))) m1 = yield client.zrange('res1',", "self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c', 4, 'd'), c.zadd('s2', 6,", "sha = sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def test_del_pattern(self): c = self.client items = ('bla',1, 'bla1','ciao',", "= yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def test_zpop_byscore(self): yield self.client.zadd('foo',", "= cjson.decode(ARGV[1]) return cjson.encode(js)''') def callback(self, request, result, args, **options): return json.loads(result.decode(request.encoding)) class", "self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 2) def test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ()) #", "json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite): multipledb = 'redis' def setUp(self): client = self.backend.client self.client =", "yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'), self.client.sunionstore('res2', 'foo', 'bla'))) m1 = yield self.client.smembers('res1') m2 =", "stdnet.utils import test, flatzset def get_version(info): if 'redis_version' in info: return info['redis_version'] else:", "6, 'a', 9, 'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2')) self.async.assertEqual(c.zcard('s3'),", "= yield self.client.zpopbyrank('foo',0) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res = yield", "('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'),", "r = yield self.client.execute_script('move2set', ('foo','bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 2) def", "self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res = yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def", "test_zdiffstore_withscores2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c', 4,", "self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r = yield client.execute_script('move2set', ('foo','bla'), 'z') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1],", "'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET', *items)) N = yield c.delpattern('bla*') self.assertEqual(N, 4) yield self.async.assertFalse(c.exists('bla'))", "1, 'a3': 1}), self.make_zset('ba', {'a1': 2, 'a3': 2, 'a4': 2}), self.make_zset('ca', {'a1': 6,", "self.client.execute_script('move2set', ('foo','bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 2) def test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script,", "= ('bla',1, 'bla1','ciao', 'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET', *items)) N = yield", "4, 'd'), c.zadd('s2', 6, 'a', 9, 'b', 100, 'c'))) r = yield c.zdiffstore('s3',", "[b'd', b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self): r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r =", "get_version(info): if 'redis_version' in info: return info['redis_version'] else: return info['Server']['redis_version'] class test_script(redisb.RedisScript): script", "= yield c.delpattern('x*') self.assertEqual(N, 2) def testMove2Set(self): yield self.multi_async((self.client.sadd('foo', 1, 2, 3, 4,", "tuple(l) self.client.rpush(name, *l) self.assertEqual(self.client.llen(name), len(l)) def make_zset(self, name, d): self.client.zadd(name, *flatzset(kwargs=d)) class TestExtraClientCommands(TestCase):", "client = self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r = yield client.execute_script('move2set', ('foo','bla'), 'z') self.assertEqual(len(r),", "in info: return info['redis_version'] else: return info['Server']['redis_version'] class test_script(redisb.RedisScript): script = (redisb.read_lua_file('commands.utils'), '''\\", "[b'a', b'c', b'd', b'e']) self.assertEqual(res, [b'b']) res = yield self.client.zpopbyscore('foo', 0, 4.5) self.assertEqual(res,", "= yield c.zdiffstore('s3', ('s1', 's2'), withscores=True) self.async.assertEqual(c.zcard('s3'), 3) r = yield c.zrange('s3', 0,", "5, 'e') res = yield self.client.zpopbyscore('foo', 2) rem = yield self.client.zrange('foo', 0, -1)", "res = yield self.client.zpopbyscore('foo', 2) rem = yield self.client.zrange('foo', 0, -1) self.assertEqual(len(rem), 4)", "commands for redis client.''' import json from hashlib import sha1 from stdnet import", "self.assertEqual(N, 2) def testMove2Set(self): yield self.multi_async((self.client.sadd('foo', 1, 2, 3, 4, 5), self.client.lpush('bla', 4,", "r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r),", "1) r = yield c.zrange('s3', 0, -1) self.assertEqual(r, [b'd']) def test_zdiffstore_withscores2(self): c =", "yield self.client.smembers('res1') m2 = yield self.client.smembers('res2') m1 = sorted((int(r) for r in m1))", "name, l): l = tuple(l) self.client.rpush(name, *l) self.assertEqual(self.client.llen(name), len(l)) def make_zset(self, name, d):", "'a4': 2}), self.make_zset('cb', {'a1': 3, 'a3': 1, 'a4': 4}))) n = yield self.client.zdiffstore('zb',", "0, -1) self.assertEqual(len(rem), 4) self.assertEqual(rem, [b'a', b'c', b'd', b'e']) self.assertEqual(res, [b'b']) res =", "self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 0) def testMove2List2(self): yield", "self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'), b'moon') N = yield c.delpattern('x*') self.assertEqual(N, 2) def", "client.zrange('res2', 0, -1) self.assertEqual(sorted(m1), [b'd', b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self): r = yield", "b'd', b'e']) self.assertEqual(res, [b'b']) res = yield self.client.zpopbyscore('foo', 0, 4.5) self.assertEqual(res, [b'a', b'c',", "-1, withscores=True) self.assertEquals(list(r), [(b'a2', 1), (b'a1', 2)]) def test_zdiffstore2(self): c = self.client yield", "= yield c.zrange('s3', 0, -1) self.assertEqual(r, [b'd']) def test_zdiffstore_withscores2(self): c = self.client yield", "for k, v in d.items(): self.client.hset(key, k, v) def make_list(self, name, l): l", "self.assertEqual(r[0], 2) self.assertEqual(r[1], 0) def testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set',", "0, -1, withscores=True) self.assertEqual(dict(r), {b'a': -5.0, b'c': -97.0, b'd': 4.0}) def test_zpop_byrank(self): yield", "yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo','bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2)", "N = yield c.delpattern('bla*') self.assertEqual(N, 4) yield self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2')) yield", "4, 'd'), c.zadd('s2', 6, 'a', 2, 'b', 100, 'c'))) r = yield c.zdiffstore('s3',", "= sorted((int(r) for r in m2)) self.assertEqual(m1, [4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def testMove2ZSet(self): client", "yield self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'), b'moon') N =", "*items)) N = yield c.delpattern('bla*') self.assertEqual(N, 4) yield self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2'))", "'a2': 1, 'a3': 2}), self.make_zset('bb', {'a1': 1, 'a3': 1, 'a4': 2}), self.make_zset('cb', {'a1':", "'c', 4, 'd'), c.zadd('s2', 6, 'a', 2, 'b', 100, 'c'))) r = yield", "yield c.zdiffstore('s3', ('s1', 's2')) self.async.assertEqual(c.zcard('s3'), 1) r = yield c.zrange('s3', 0, -1) self.assertEqual(r,", "k, v in d.items(): self.client.hset(key, k, v) def make_list(self, name, l): l =", "self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((self.client.sinterstore('res1', 'foo',", "test_zdiffstore(self): yield self.multi_async((self.make_zset('aa', {'a1': 1, 'a2': 1, 'a3': 1}), self.make_zset('ba', {'a1': 2, 'a3':", "withscores=True) self.assertEquals(list(r), [(b'a2', 1), (b'a1', 2)]) def test_zdiffstore2(self): c = self.client yield self.multi_async((c.zadd('s1',", "'a', 2, 'b', 3, 'c', 4, 'd', 5, 'e') res = yield self.client.zpopbyscore('foo',", "m1 = yield client.zrange('res1', 0, -1) m2 = yield client.zrange('res2', 0, -1) self.assertEqual(sorted(m1),", "9, 'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2')) self.async.assertEqual(c.zcard('s3'), 1) r", "'foo', ()) # ZSET SCRIPTING COMMANDS def test_zdiffstore(self): yield self.multi_async((self.make_zset('aa', {'a1': 1, 'a2':", "yield self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'), b'moon') N = yield c.delpattern('x*') self.assertEqual(N, 2)", "def test_zdiffstore(self): yield self.multi_async((self.make_zset('aa', {'a1': 1, 'a2': 1, 'a3': 1}), self.make_zset('ba', {'a1': 2,", "testMove2Set(self): yield self.multi_async((self.client.sadd('foo', 1, 2, 3, 4, 5), self.client.lpush('bla', 4, 5, 6, 7,", "self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2)", "info['Server']['redis_version'] class test_script(redisb.RedisScript): script = (redisb.read_lua_file('commands.utils'), '''\\ local js = cjson.decode(ARGV[1]) return cjson.encode(js)''')", "self.async.assertEqual(c.get('xxxx'), b'moon') N = yield c.delpattern('x*') self.assertEqual(N, 2) def testMove2Set(self): yield self.multi_async((self.client.sadd('foo', 1,", "self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'), b'moon') N = yield c.delpattern('x*') self.assertEqual(N,", "rem = yield self.client.zrange('foo', 0, -1) self.assertEqual(len(rem), 4) self.assertEqual(rem, [b'a', b'c', b'd', b'e'])", "redis client.''' import json from hashlib import sha1 from stdnet import getdb from", "def get_version(info): if 'redis_version' in info: return info['redis_version'] else: return info['Server']['redis_version'] class test_script(redisb.RedisScript):", "4) yield self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'), b'moon') N", "'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'), self.client.sunionstore('res2',", "b'd': 4.0}) def test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res = yield self.client.zpopbyrank('foo',0) rem = yield", "test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ()) # ZSET SCRIPTING COMMANDS def test_zdiffstore(self): yield self.multi_async((self.make_zset('aa',", "c.zdiffstore('s3', ('s1', 's2')) self.async.assertEqual(c.zcard('s3'), 1) r = yield c.zrange('s3', 0, -1) self.assertEqual(r, [b'd'])", "client.lpush('bla','d','e','f','g'))) r = yield client.execute_script('move2set', ('foo','bla'), 'z') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1)", "def testMoveSetSet(self): r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'),", "c = self.backend.client self.assertEqual(c.prefix, '') size = yield c.dbsize() self.assertTrue(size >= 0) def", "self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res = yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e'])", "0, -1) m2 = yield client.zrange('res2', 0, -1) self.assertEqual(sorted(m1), [b'd', b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g'])", "multipledb = 'redis' def setUp(self): client = self.backend.client self.client = client.prefixed(self.namespace) def tearDown(self):", "yield self.client.zdiffstore('za', ['aa', 'ba', 'ca']) self.assertEqual(n, 1) r = yield self.client.zrange('za', 0, -1,", "7, 8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2)", "2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 0) def testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r = yield", "self.multi_async((self.make_zset('aa', {'a1': 1, 'a2': 1, 'a3': 1}), self.make_zset('ba', {'a1': 2, 'a3': 2, 'a4':", "8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1],", "json from hashlib import sha1 from stdnet import getdb from stdnet.backends import redisb", "key, d): for k, v in d.items(): self.client.hset(key, k, v) def make_list(self, name,", "m1 = yield self.client.smembers('res1') m2 = yield self.client.smembers('res2') m1 = sorted((int(r) for r", "= yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def test_zpop_byscore(self): yield self.client.zadd('foo', 1, 'a', 2, 'b', 3,", "1) r = yield self.client.zrange('za', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1)]) def test_zdiffstore_withscores(self):", "self.make_zset('cb', {'a1': 3, 'a3': 1, 'a4': 4}))) n = yield self.client.zdiffstore('zb', ['ab', 'bb',", "# ZSET SCRIPTING COMMANDS def test_zdiffstore(self): yield self.multi_async((self.make_zset('aa', {'a1': 1, 'a2': 1, 'a3':", "= yield self.client.execute_script('move2set', ('foo','bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 2) def test_bad_execute_script(self):", "'a', 2, 'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 9, 'b', 100,", "yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def test_zpop_byscore(self): yield self.client.zadd('foo', 1,", "self.client.lpush('bla', 4, 5, 6, 7, 8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's')", "{'a1': 6, 'a3': 5, 'a4': 4}))) n = yield self.client.zdiffstore('za', ['aa', 'ba', 'ca'])", "self.assertEqual(r[1], 0) def testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo','bla'), 's')", "2) r = yield self.client.zrange('zb', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1), (b'a1', 2)])", "4, 5), self.client.lpush('bla', 4, 5, 6, 7, 8))) r = yield self.client.execute_script('move2set', ('foo',", "self.assertEqual(dict(r), {b'a': -5.0, b'c': -97.0, b'd': 4.0}) def test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res =", "yield c.zrange('s3', 0, -1) self.assertEqual(r, [b'd']) def test_zdiffstore_withscores2(self): c = self.client yield self.multi_async((c.zadd('s1',", "testMove2ZSet(self): client = self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r = yield client.execute_script('move2set', ('foo','bla'), 'z')", "yield c.delpattern('bla*') self.assertEqual(N, 4) yield self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield", "res = yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def test_zpop_byscore(self): yield", "test_del_pattern(self): c = self.client items = ('bla',1, 'bla1','ciao', 'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield", "self.client.zrange('za', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1)]) def test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab', {'a1': 6,", "= yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2)", "self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'), self.client.sunionstore('res2', 'foo', 'bla'))) m1 =", "self.assertEqual(c.prefix, '') size = yield c.dbsize() self.assertTrue(size >= 0) def test_script_meta(self): script =", "4, 5, 6, 7, 8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r),", "= yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield", "{b'a': -5.0, b'c': -97.0, b'd': 4.0}) def test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res = yield", "4) self.assertEqual(rem, [b'a', b'c', b'd', b'e']) self.assertEqual(res, [b'b']) res = yield self.client.zpopbyscore('foo', 0,", "n = yield self.client.zdiffstore('za', ['aa', 'ba', 'ca']) self.assertEqual(n, 1) r = yield self.client.zrange('za',", "v) def make_list(self, name, l): l = tuple(l) self.client.rpush(name, *l) self.assertEqual(self.client.llen(name), len(l)) def", "'') size = yield c.dbsize() self.assertTrue(size >= 0) def test_script_meta(self): script = redisb.get_script('test_script')", "withscores=True) self.assertEqual(dict(r), {b'a': -5.0, b'c': -97.0, b'd': 4.0}) def test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res", "= self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c', 4, 'd'), c.zadd('s2',", "6, 7, 8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0],", "4}))) n = yield self.client.zdiffstore('za', ['aa', 'ba', 'ca']) self.assertEqual(n, 1) r = yield", "TestExtraClientCommands(TestCase): def test_coverage(self): c = self.backend.client self.assertEqual(c.prefix, '') size = yield c.dbsize() self.assertTrue(size", "js = cjson.decode(ARGV[1]) return cjson.encode(js)''') def callback(self, request, result, args, **options): return json.loads(result.decode(request.encoding))", "TestCase(test.TestWrite): multipledb = 'redis' def setUp(self): client = self.backend.client self.client = client.prefixed(self.namespace) def", "'bla')))) m1 = yield client.zrange('res1', 0, -1) m2 = yield client.zrange('res2', 0, -1)", "r = yield c.zrange('s3', 0, -1) self.assertEqual(r, [b'd']) def test_zdiffstore_withscores2(self): c = self.client", "test_zdiffstore2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c', 4,", "yield self.client.execute_script('move2set', ('foo','bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 2) def test_bad_execute_script(self): self.assertRaises(redisb.RedisError,", "[4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def testMove2ZSet(self): client = self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r =", "self.client.flushdb() def make_hash(self, key, d): for k, v in d.items(): self.client.hset(key, k, v)", "stdnet.backends import redisb from stdnet.utils import test, flatzset def get_version(info): if 'redis_version' in", "self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res = yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem = yield self.client.zrange('foo',0,-1)", "self.multi_async((self.make_zset('ab', {'a1': 6, 'a2': 1, 'a3': 2}), self.make_zset('bb', {'a1': 1, 'a3': 1, 'a4':", "self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def test_zpop_byscore(self): yield self.client.zadd('foo', 1, 'a', 2, 'b', 3, 'c', 4,", "'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET', *items)) N = yield c.delpattern('bla*') self.assertEqual(N, 4) yield", "('bla',1, 'bla1','ciao', 'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET', *items)) N = yield c.delpattern('bla*')", "yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a',", "self.client.execute_script, 'foo', ()) # ZSET SCRIPTING COMMANDS def test_zdiffstore(self): yield self.multi_async((self.make_zset('aa', {'a1': 1,", "3, 4, 5), self.client.lpush('bla', 4, 5, 6, 7, 8))) r = yield self.client.execute_script('move2set',", "2, 'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 2, 'b', 100, 'c')))", "yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res = yield self.client.zpopbyrank('foo',0) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a'])", "r = yield c.zdiffstore('s3', ('s1', 's2')) self.async.assertEqual(c.zcard('s3'), 1) r = yield c.zrange('s3', 0,", "r = yield c.zrange('s3', 0, -1, withscores=True) self.assertEqual(dict(r), {b'a': -5.0, b'c': -97.0, b'd':", "def make_hash(self, key, d): for k, v in d.items(): self.client.hset(key, k, v) def", "r in m2)) self.assertEqual(m1, [4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def testMove2ZSet(self): client = self.client yield", "'z') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')), client.zunionstore('res2', ('foo',", "*flatzset(kwargs=d)) class TestExtraClientCommands(TestCase): def test_coverage(self): c = self.backend.client self.assertEqual(c.prefix, '') size = yield", "5, 'a4': 4}))) n = yield self.client.zdiffstore('za', ['aa', 'ba', 'ca']) self.assertEqual(n, 1) r", "sorted((int(r) for r in m2)) self.assertEqual(m1, [4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def testMove2ZSet(self): client =", "def setUp(self): client = self.backend.client self.client = client.prefixed(self.namespace) def tearDown(self): return self.client.flushdb() def", "3) r = yield c.zrange('s3', 0, -1, withscores=True) self.assertEqual(dict(r), {b'a': -5.0, b'c': -97.0,", "= redisb.get_script('test_script') self.assertTrue(script.script) sha = sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def test_del_pattern(self): c = self.client items", "hashlib import sha1 from stdnet import getdb from stdnet.backends import redisb from stdnet.utils", "self.assertEqual(n, 2) r = yield self.client.zrange('zb', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1), (b'a1',", "3, 'c', 4, 'd', 5, 'e') res = yield self.client.zpopbyscore('foo', 2) rem =", "make_list(self, name, l): l = tuple(l) self.client.rpush(name, *l) self.assertEqual(self.client.llen(name), len(l)) def make_zset(self, name,", "self.assertEqual(self.client.llen(name), len(l)) def make_zset(self, name, d): self.client.zadd(name, *flatzset(kwargs=d)) class TestExtraClientCommands(TestCase): def test_coverage(self): c", "test_script_meta(self): script = redisb.get_script('test_script') self.assertTrue(script.script) sha = sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def test_del_pattern(self): c =", "self.async.assertTrue(c.execute_command('MSET', *items)) N = yield c.delpattern('bla*') self.assertEqual(N, 4) yield self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1')) yield", "make_zset(self, name, d): self.client.zadd(name, *flatzset(kwargs=d)) class TestExtraClientCommands(TestCase): def test_coverage(self): c = self.backend.client self.assertEqual(c.prefix,", "self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def testMove2ZSet(self): client = self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r = yield", "= self.client items = ('bla',1, 'bla1','ciao', 'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET', *items))", "self.make_zset('ca', {'a1': 6, 'a3': 5, 'a4': 4}))) n = yield self.client.zdiffstore('za', ['aa', 'ba',", "{'a1': 3, 'a3': 1, 'a4': 4}))) n = yield self.client.zdiffstore('zb', ['ab', 'bb', 'cb'],", "yield self.client.zpopbyscore('foo', 0, 4.5) self.assertEqual(res, [b'a', b'c', b'd']) rem = yield self.client.zrange('foo', 0,", "c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c', 4, 'd'),", "-1) m2 = yield client.zrange('res2', 0, -1) self.assertEqual(sorted(m1), [b'd', b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def", "in m1)) m2 = sorted((int(r) for r in m2)) self.assertEqual(m1, [4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8])", "('s1', 's2'), withscores=True) self.async.assertEqual(c.zcard('s3'), 3) r = yield c.zrange('s3', 0, -1, withscores=True) self.assertEqual(dict(r),", "def test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ()) # ZSET SCRIPTING COMMANDS def test_zdiffstore(self): yield", "import test, flatzset def get_version(info): if 'redis_version' in info: return info['redis_version'] else: return", "'s2')) self.async.assertEqual(c.zcard('s3'), 1) r = yield c.zrange('s3', 0, -1) self.assertEqual(r, [b'd']) def test_zdiffstore_withscores2(self):", "= yield self.client.zrange('za', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1)]) def test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab',", "self.async.assertEqual(c.zcard('s3'), 3) r = yield c.zrange('s3', 0, -1, withscores=True) self.assertEqual(dict(r), {b'a': -5.0, b'c':", "self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')), client.zunionstore('res2', ('foo', 'bla'))))", "return cjson.encode(js)''') def callback(self, request, result, args, **options): return json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite): multipledb", "l): l = tuple(l) self.client.rpush(name, *l) self.assertEqual(self.client.llen(name), len(l)) def make_zset(self, name, d): self.client.zadd(name,", "yield self.multi_async((self.make_zset('ab', {'a1': 6, 'a2': 1, 'a3': 2}), self.make_zset('bb', {'a1': 1, 'a3': 1,", "= yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res = yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem =", "m2 = yield self.client.smembers('res2') m1 = sorted((int(r) for r in m1)) m2 =", "2}), self.make_zset('ca', {'a1': 6, 'a3': 5, 'a4': 4}))) n = yield self.client.zdiffstore('za', ['aa',", "'a', 9, 'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2')) self.async.assertEqual(c.zcard('s3'), 1)", "'a4': 2}), self.make_zset('ca', {'a1': 6, 'a3': 5, 'a4': 4}))) n = yield self.client.zdiffstore('za',", "len(l)) def make_zset(self, name, d): self.client.zadd(name, *flatzset(kwargs=d)) class TestExtraClientCommands(TestCase): def test_coverage(self): c =", "m2)) self.assertEqual(m1, [4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def testMove2ZSet(self): client = self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g')))", "yield self.multi_async((self.make_zset('aa', {'a1': 1, 'a2': 1, 'a3': 1}), self.make_zset('ba', {'a1': 2, 'a3': 2,", "b'c', b'd', b'e']) self.assertEqual(res, [b'b']) res = yield self.client.zpopbyscore('foo', 0, 4.5) self.assertEqual(res, [b'a',", "self.assertEqual(script.sha1,sha) def test_del_pattern(self): c = self.client items = ('bla',1, 'bla1','ciao', 'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun',", "self.assertEqual(r, [b'd']) def test_zdiffstore_withscores2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b',", "r = yield self.client.zrange('za', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1)]) def test_zdiffstore_withscores(self): yield", "self.backend.client self.client = client.prefixed(self.namespace) def tearDown(self): return self.client.flushdb() def make_hash(self, key, d): for", "2) def test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ()) # ZSET SCRIPTING COMMANDS def test_zdiffstore(self):", "m1)) m2 = sorted((int(r) for r in m2)) self.assertEqual(m1, [4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def", "self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'), b'moon') N = yield c.delpattern('x*') self.assertEqual(N, 2) def testMove2Set(self): yield", "6, 'a3': 5, 'a4': 4}))) n = yield self.client.zdiffstore('za', ['aa', 'ba', 'ca']) self.assertEqual(n,", "[(b'a2', 1)]) def test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab', {'a1': 6, 'a2': 1, 'a3': 2}), self.make_zset('bb',", "c.dbsize() self.assertTrue(size >= 0) def test_script_meta(self): script = redisb.get_script('test_script') self.assertTrue(script.script) sha = sha1(script.script.encode('utf-8')).hexdigest()", "client.''' import json from hashlib import sha1 from stdnet import getdb from stdnet.backends", "def tearDown(self): return self.client.flushdb() def make_hash(self, key, d): for k, v in d.items():", "self.assertEquals(list(r), [(b'a2', 1)]) def test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab', {'a1': 6, 'a2': 1, 'a3': 2}),", "'a3': 1}), self.make_zset('ba', {'a1': 2, 'a3': 2, 'a4': 2}), self.make_zset('ca', {'a1': 6, 'a3':", "yield self.client.zpopbyrank('foo',0) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res = yield self.client.zpopbyrank('foo',0,2)", "make_hash(self, key, d): for k, v in d.items(): self.client.hset(key, k, v) def make_list(self,", "= self.backend.client self.assertEqual(c.prefix, '') size = yield c.dbsize() self.assertTrue(size >= 0) def test_script_meta(self):", "withscores=True) self.assertEquals(list(r), [(b'a2', 1)]) def test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab', {'a1': 6, 'a2': 1, 'a3':", "'s') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'), self.client.sunionstore('res2', 'foo',", "= yield self.client.zdiffstore('zb', ['ab', 'bb', 'cb'], withscores=True) self.assertEqual(n, 2) r = yield self.client.zrange('zb',", "'foo', 'bla'))) m1 = yield self.client.smembers('res1') m2 = yield self.client.smembers('res2') m1 = sorted((int(r)", "4.5) self.assertEqual(res, [b'a', b'c', b'd']) rem = yield self.client.zrange('foo', 0, -1) self.assertEqual(rem, [b'e'])", "self.client.lpush('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo','bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 2)", "2) rem = yield self.client.zrange('foo', 0, -1) self.assertEqual(len(rem), 4) self.assertEqual(rem, [b'a', b'c', b'd',", "import redisb from stdnet.utils import test, flatzset def get_version(info): if 'redis_version' in info:", "1), (b'a1', 2)]) def test_zdiffstore2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2,", "1, 'a4': 4}))) n = yield self.client.zdiffstore('zb', ['ab', 'bb', 'cb'], withscores=True) self.assertEqual(n, 2)", "('foo','bla'), 'z') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')), client.zunionstore('res2',", "import getdb from stdnet.backends import redisb from stdnet.utils import test, flatzset def get_version(info):", "test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab', {'a1': 6, 'a2': 1, 'a3': 2}), self.make_zset('bb', {'a1': 1, 'a3':", "import sha1 from stdnet import getdb from stdnet.backends import redisb from stdnet.utils import", "('foo', 'bla')), client.zunionstore('res2', ('foo', 'bla')))) m1 = yield client.zrange('res1', 0, -1) m2 =", "r = yield client.execute_script('move2set', ('foo','bla'), 'z') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield", "4}))) n = yield self.client.zdiffstore('zb', ['ab', 'bb', 'cb'], withscores=True) self.assertEqual(n, 2) r =", "= 'redis' def setUp(self): client = self.backend.client self.client = client.prefixed(self.namespace) def tearDown(self): return", "= client.prefixed(self.namespace) def tearDown(self): return self.client.flushdb() def make_hash(self, key, d): for k, v", "self.multi_async((c.zadd('s1', 1, 'a', 2, 'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 2,", "c.delpattern('bla*') self.assertEqual(N, 4) yield self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa')) yield self.async.assertEqual(c.get('xxxx'),", "name, d): self.client.zadd(name, *flatzset(kwargs=d)) class TestExtraClientCommands(TestCase): def test_coverage(self): c = self.backend.client self.assertEqual(c.prefix, '')", "def test_zpop_byrank(self): yield self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res = yield self.client.zpopbyrank('foo',0) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4)", "redisb.get_script('test_script') self.assertTrue(script.script) sha = sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def test_del_pattern(self): c = self.client items =", "c = self.client items = ('bla',1, 'bla1','ciao', 'bla2','foo', 'xxxx','moon', 'blaaaaaaaaaaaaaa','sun', 'xyyyy','earth') yield self.async.assertTrue(c.execute_command('MSET',", "yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res = yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem = yield", "-1) self.assertEqual(len(rem), 4) self.assertEqual(rem, [b'a', b'c', b'd', b'e']) self.assertEqual(res, [b'b']) res = yield", "'a4': 4}))) n = yield self.client.zdiffstore('za', ['aa', 'ba', 'ca']) self.assertEqual(n, 1) r =", "[b'b']) res = yield self.client.zpopbyscore('foo', 0, 4.5) self.assertEqual(res, [b'a', b'c', b'd']) rem =", "client.execute_script('move2set', ('foo','bla'), 'z') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')),", "self.client.sadd('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1],", "self.client.zpopbyscore('foo', 0, 4.5) self.assertEqual(res, [b'a', b'c', b'd']) rem = yield self.client.zrange('foo', 0, -1)", "{'a1': 6, 'a2': 1, 'a3': 2}), self.make_zset('bb', {'a1': 1, 'a3': 1, 'a4': 2}),", "b'e']) self.assertEqual(res, [b'b']) res = yield self.client.zpopbyscore('foo', 0, 4.5) self.assertEqual(res, [b'a', b'c', b'd'])", "c.zdiffstore('s3', ('s1', 's2'), withscores=True) self.async.assertEqual(c.zcard('s3'), 3) r = yield c.zrange('s3', 0, -1, withscores=True)", "3, 'a3': 1, 'a4': 4}))) n = yield self.client.zdiffstore('zb', ['ab', 'bb', 'cb'], withscores=True)", "1, 2, 3, 4, 5), self.client.lpush('bla', 4, 5, 6, 7, 8))) r =", "2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')), client.zunionstore('res2', ('foo', 'bla')))) m1", "= yield client.zrange('res1', 0, -1) m2 = yield client.zrange('res2', 0, -1) self.assertEqual(sorted(m1), [b'd',", "'e') res = yield self.client.zpopbyscore('foo', 2) rem = yield self.client.zrange('foo', 0, -1) self.assertEqual(len(rem),", "2, 'a4': 2}), self.make_zset('ca', {'a1': 6, 'a3': 5, 'a4': 4}))) n = yield", "test_zpop_byscore(self): yield self.client.zadd('foo', 1, 'a', 2, 'b', 3, 'c', 4, 'd', 5, 'e')", "'a', 2, 'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2'), withscores=True) self.async.assertEqual(c.zcard('s3'),", "ZSET SCRIPTING COMMANDS def test_zdiffstore(self): yield self.multi_async((self.make_zset('aa', {'a1': 1, 'a2': 1, 'a3': 1}),", "d): for k, v in d.items(): self.client.hset(key, k, v) def make_list(self, name, l):", "COMMANDS def test_zdiffstore(self): yield self.multi_async((self.make_zset('aa', {'a1': 1, 'a2': 1, 'a3': 1}), self.make_zset('ba', {'a1':", "test_coverage(self): c = self.backend.client self.assertEqual(c.prefix, '') size = yield c.dbsize() self.assertTrue(size >= 0)", "'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2'), withscores=True) self.async.assertEqual(c.zcard('s3'), 3) r", "d): self.client.zadd(name, *flatzset(kwargs=d)) class TestExtraClientCommands(TestCase): def test_coverage(self): c = self.backend.client self.assertEqual(c.prefix, '') size", "'s2'), withscores=True) self.async.assertEqual(c.zcard('s3'), 3) r = yield c.zrange('s3', 0, -1, withscores=True) self.assertEqual(dict(r), {b'a':", "args, **options): return json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite): multipledb = 'redis' def setUp(self): client =", "m1 = sorted((int(r) for r in m1)) m2 = sorted((int(r) for r in", "= yield c.delpattern('bla*') self.assertEqual(N, 4) yield self.async.assertFalse(c.exists('bla')) yield self.async.assertFalse(c.exists('bla1')) yield self.async.assertFalse(c.exists('bla2')) yield self.async.assertFalse(c.exists('blaaaaaaaaaaaaaa'))", "2, 'b', 3, 'c', 4, 'd', 5, 'e') res = yield self.client.zpopbyscore('foo', 2)", "d.items(): self.client.hset(key, k, v) def make_list(self, name, l): l = tuple(l) self.client.rpush(name, *l)", "callback(self, request, result, args, **options): return json.loads(result.decode(request.encoding)) class TestCase(test.TestWrite): multipledb = 'redis' def", "class TestCase(test.TestWrite): multipledb = 'redis' def setUp(self): client = self.backend.client self.client = client.prefixed(self.namespace)", "self.assertEqual(r[0], 2) self.assertEqual(r[1], 2) def test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo', ()) # ZSET SCRIPTING", "self.client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e') res = yield self.client.zpopbyrank('foo',0) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res", "def test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab', {'a1': 6, 'a2': 1, 'a3': 2}), self.make_zset('bb', {'a1': 1,", "self.assertTrue(size >= 0) def test_script_meta(self): script = redisb.get_script('test_script') self.assertTrue(script.script) sha = sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha)", "self.client.zrange('zb', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1), (b'a1', 2)]) def test_zdiffstore2(self): c =", "5), self.client.lpush('bla', 4, 5, 6, 7, 8))) r = yield self.client.execute_script('move2set', ('foo', 'bla'),", "if 'redis_version' in info: return info['redis_version'] else: return info['Server']['redis_version'] class test_script(redisb.RedisScript): script =", "('s1', 's2')) self.async.assertEqual(c.zcard('s3'), 1) r = yield c.zrange('s3', 0, -1) self.assertEqual(r, [b'd']) def", "setUp(self): client = self.backend.client self.client = client.prefixed(self.namespace) def tearDown(self): return self.client.flushdb() def make_hash(self,", "'a3': 2}), self.make_zset('bb', {'a1': 1, 'a3': 1, 'a4': 2}), self.make_zset('cb', {'a1': 3, 'a3':", "testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r = yield self.client.execute_script('move2set', ('foo','bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0],", "r in m1)) m2 = sorted((int(r) for r in m2)) self.assertEqual(m1, [4,5]) self.assertEqual(m2,", "flatzset def get_version(info): if 'redis_version' in info: return info['redis_version'] else: return info['Server']['redis_version'] class", "from stdnet.utils import test, flatzset def get_version(info): if 'redis_version' in info: return info['redis_version']", "self.assertEqual(m1, [4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def testMove2ZSet(self): client = self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r", "100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2'), withscores=True) self.async.assertEqual(c.zcard('s3'), 3) r =", "'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 9, 'b', 100, 'c'))) r", "redisb from stdnet.utils import test, flatzset def get_version(info): if 'redis_version' in info: return", "'c'))) r = yield c.zdiffstore('s3', ('s1', 's2')) self.async.assertEqual(c.zcard('s3'), 1) r = yield c.zrange('s3',", "'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2')) self.async.assertEqual(c.zcard('s3'), 1) r =", "in d.items(): self.client.hset(key, k, v) def make_list(self, name, l): l = tuple(l) self.client.rpush(name,", "for r in m2)) self.assertEqual(m1, [4,5]) self.assertEqual(m2, [1,2,3,4,5,6,7,8]) def testMove2ZSet(self): client = self.client", "-1) self.assertEqual(sorted(m1), [b'd', b'e']) self.assertEqual(sorted(m2), [b'a',b'b',b'c',b'd',b'e',b'f',b'g']) def testMoveSetSet(self): r = yield self.multi_async((self.client.sadd('foo',1,2,3,4,5), self.client.sadd('bla',4,5,6,7,8)))", "self.client.zadd(name, *flatzset(kwargs=d)) class TestExtraClientCommands(TestCase): def test_coverage(self): c = self.backend.client self.assertEqual(c.prefix, '') size =", "1, 'a2': 1, 'a3': 1}), self.make_zset('ba', {'a1': 2, 'a3': 2, 'a4': 2}), self.make_zset('ca',", "rem = yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def test_zpop_byscore(self): yield self.client.zadd('foo', 1, 'a', 2, 'b',", "['ab', 'bb', 'cb'], withscores=True) self.assertEqual(n, 2) r = yield self.client.zrange('zb', 0, -1, withscores=True)", "res = yield self.client.zpopbyrank('foo',0) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res =", "self.client.rpush(name, *l) self.assertEqual(self.client.llen(name), len(l)) def make_zset(self, name, d): self.client.zadd(name, *flatzset(kwargs=d)) class TestExtraClientCommands(TestCase): def", "'a3': 1, 'a4': 4}))) n = yield self.client.zdiffstore('zb', ['ab', 'bb', 'cb'], withscores=True) self.assertEqual(n,", "additional commands for redis client.''' import json from hashlib import sha1 from stdnet", "'a4': 4}))) n = yield self.client.zdiffstore('zb', ['ab', 'bb', 'cb'], withscores=True) self.assertEqual(n, 2) r", "= yield client.execute_script('move2set', ('foo','bla'), 'z') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((client.zinterstore('res1',", "yield self.client.zdiffstore('zb', ['ab', 'bb', 'cb'], withscores=True) self.assertEqual(n, 2) r = yield self.client.zrange('zb', 0,", "SCRIPTING COMMANDS def test_zdiffstore(self): yield self.multi_async((self.make_zset('aa', {'a1': 1, 'a2': 1, 'a3': 1}), self.make_zset('ba',", "2) self.assertEqual(r[1], 1) yield self.multi_async((client.zinterstore('res1', ('foo', 'bla')), client.zunionstore('res2', ('foo', 'bla')))) m1 = yield", "= yield self.client.zrange('foo', 0, -1) self.assertEqual(len(rem), 4) self.assertEqual(rem, [b'a', b'c', b'd', b'e']) self.assertEqual(res,", "self.assertEqual(r[1], 1) yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'), self.client.sunionstore('res2', 'foo', 'bla'))) m1 = yield self.client.smembers('res1')", "self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 0) def testMove2List2(self): yield self.multi_async((self.client.lpush('foo',1,2,3,4,5), self.client.lpush('bla',4,5,6,7,8))) r =", "from hashlib import sha1 from stdnet import getdb from stdnet.backends import redisb from", "tearDown(self): return self.client.flushdb() def make_hash(self, key, d): for k, v in d.items(): self.client.hset(key,", "0, 4.5) self.assertEqual(res, [b'a', b'c', b'd']) rem = yield self.client.zrange('foo', 0, -1) self.assertEqual(rem,", "r = yield self.client.zrange('zb', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1), (b'a1', 2)]) def", "1) yield self.multi_async((self.client.sinterstore('res1', 'foo', 'bla'), self.client.sunionstore('res2', 'foo', 'bla'))) m1 = yield self.client.smembers('res1') m2", "self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem = yield self.client.zrange('foo',0,-1) self.assertEqual(rem,[b'e']) def test_zpop_byscore(self): yield self.client.zadd('foo', 1, 'a',", "yield client.execute_script('move2set', ('foo','bla'), 'z') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 1) yield self.multi_async((client.zinterstore('res1', ('foo',", "self.assertEqual(res, [b'b']) res = yield self.client.zpopbyscore('foo', 0, 4.5) self.assertEqual(res, [b'a', b'c', b'd']) rem", "c.zadd('s2', 6, 'a', 9, 'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2'))", "c.zrange('s3', 0, -1) self.assertEqual(r, [b'd']) def test_zdiffstore_withscores2(self): c = self.client yield self.multi_async((c.zadd('s1', 1,", "script = redisb.get_script('test_script') self.assertTrue(script.script) sha = sha1(script.script.encode('utf-8')).hexdigest() self.assertEqual(script.sha1,sha) def test_del_pattern(self): c = self.client", "client = self.backend.client self.client = client.prefixed(self.namespace) def tearDown(self): return self.client.flushdb() def make_hash(self, key,", "from stdnet import getdb from stdnet.backends import redisb from stdnet.utils import test, flatzset", "self.assertEqual(rem,[b'e']) def test_zpop_byscore(self): yield self.client.zadd('foo', 1, 'a', 2, 'b', 3, 'c', 4, 'd',", "1}), self.make_zset('ba', {'a1': 2, 'a3': 2, 'a4': 2}), self.make_zset('ca', {'a1': 6, 'a3': 5,", "'ca']) self.assertEqual(n, 1) r = yield self.client.zrange('za', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1)])", "= yield c.dbsize() self.assertTrue(size >= 0) def test_script_meta(self): script = redisb.get_script('test_script') self.assertTrue(script.script) sha", "self.client yield self.multi_async((client.zadd('foo',1,'a',2,'b',3,'c',4,'d',5,'e'), client.lpush('bla','d','e','f','g'))) r = yield client.execute_script('move2set', ('foo','bla'), 'z') self.assertEqual(len(r), 2) self.assertEqual(r[0],", "yield client.zrange('res1', 0, -1) m2 = yield client.zrange('res2', 0, -1) self.assertEqual(sorted(m1), [b'd', b'e'])", "'b', 3, 'c', 4, 'd', 5, 'e') res = yield self.client.zpopbyscore('foo', 2) rem", "'c', 4, 'd', 5, 'e') res = yield self.client.zpopbyscore('foo', 2) rem = yield", "self.backend.client self.assertEqual(c.prefix, '') size = yield c.dbsize() self.assertTrue(size >= 0) def test_script_meta(self): script", "'b', 3, 'c', 4, 'd'), c.zadd('s2', 6, 'a', 2, 'b', 100, 'c'))) r", "'c'))) r = yield c.zdiffstore('s3', ('s1', 's2'), withscores=True) self.async.assertEqual(c.zcard('s3'), 3) r = yield", "= yield self.client.zpopbyscore('foo', 2) rem = yield self.client.zrange('foo', 0, -1) self.assertEqual(len(rem), 4) self.assertEqual(rem,", "class TestExtraClientCommands(TestCase): def test_coverage(self): c = self.backend.client self.assertEqual(c.prefix, '') size = yield c.dbsize()", "self.assertEqual(n, 1) r = yield self.client.zrange('za', 0, -1, withscores=True) self.assertEquals(list(r), [(b'a2', 1)]) def", "(b'a1', 2)]) def test_zdiffstore2(self): c = self.client yield self.multi_async((c.zadd('s1', 1, 'a', 2, 'b',", "rem = yield self.client.zrange('foo',0,-1) self.assertEqual(len(rem),4) self.assertEqual(rem,[b'b',b'c',b'd',b'e']) self.assertEqual(res,[b'a']) res = yield self.client.zpopbyrank('foo',0,2) self.assertEqual(res,[b'b',b'c',b'd']) rem", "(redisb.read_lua_file('commands.utils'), '''\\ local js = cjson.decode(ARGV[1]) return cjson.encode(js)''') def callback(self, request, result, args,", "c.zadd('s2', 6, 'a', 2, 'b', 100, 'c'))) r = yield c.zdiffstore('s3', ('s1', 's2'),", "('foo','bla'), 's') self.assertEqual(len(r), 2) self.assertEqual(r[0], 2) self.assertEqual(r[1], 2) def test_bad_execute_script(self): self.assertRaises(redisb.RedisError, self.client.execute_script, 'foo',", "c.zrange('s3', 0, -1, withscores=True) self.assertEqual(dict(r), {b'a': -5.0, b'c': -97.0, b'd': 4.0}) def test_zpop_byrank(self):", "4, 'd', 5, 'e') res = yield self.client.zpopbyscore('foo', 2) rem = yield self.client.zrange('foo',", "1)]) def test_zdiffstore_withscores(self): yield self.multi_async((self.make_zset('ab', {'a1': 6, 'a2': 1, 'a3': 2}), self.make_zset('bb', {'a1':" ]
[ "where would pass in creds and port and such # name= is the", "the \"core\" connection # default localhost and port mongoengine.register_connection(alias='core', name='demo_dealership') # could have", "connection # default localhost and port mongoengine.register_connection(alias='core', name='demo_dealership') # could have multiple like", "is the database db name # when we define our classes we will", "this is where would pass in creds and port and such # name=", "refer to the \"core\" connection # default localhost and port mongoengine.register_connection(alias='core', name='demo_dealership') #", "name # when we define our classes we will refer to the \"core\"", "such # name= is the database db name # when we define our", "mongoengine def global_init(): # this is where would pass in creds and port", "pass in creds and port and such # name= is the database db", "the database db name # when we define our classes we will refer", "creds and port and such # name= is the database db name #", "def global_init(): # this is where would pass in creds and port and", "default localhost and port mongoengine.register_connection(alias='core', name='demo_dealership') # could have multiple like # mongoengine.register_connection(alias='analytics',", "define our classes we will refer to the \"core\" connection # default localhost", "# this is where would pass in creds and port and such #", "# name= is the database db name # when we define our classes", "name= is the database db name # when we define our classes we", "global_init(): # this is where would pass in creds and port and such", "our classes we will refer to the \"core\" connection # default localhost and", "localhost and port mongoengine.register_connection(alias='core', name='demo_dealership') # could have multiple like # mongoengine.register_connection(alias='analytics', name='anotherDBname')", "\"core\" connection # default localhost and port mongoengine.register_connection(alias='core', name='demo_dealership') # could have multiple", "and port and such # name= is the database db name # when", "classes we will refer to the \"core\" connection # default localhost and port", "when we define our classes we will refer to the \"core\" connection #", "database db name # when we define our classes we will refer to", "to the \"core\" connection # default localhost and port mongoengine.register_connection(alias='core', name='demo_dealership') # could", "# default localhost and port mongoengine.register_connection(alias='core', name='demo_dealership') # could have multiple like #", "db name # when we define our classes we will refer to the", "# when we define our classes we will refer to the \"core\" connection", "is where would pass in creds and port and such # name= is", "we define our classes we will refer to the \"core\" connection # default", "we will refer to the \"core\" connection # default localhost and port mongoengine.register_connection(alias='core',", "will refer to the \"core\" connection # default localhost and port mongoengine.register_connection(alias='core', name='demo_dealership')", "import mongoengine def global_init(): # this is where would pass in creds and", "in creds and port and such # name= is the database db name", "<reponame>jabelk/mongodb-for-python-developers import mongoengine def global_init(): # this is where would pass in creds", "would pass in creds and port and such # name= is the database", "port and such # name= is the database db name # when we", "and such # name= is the database db name # when we define" ]
[ "self.__error @error.setter def error(self, new_error): self.__error = new_error def __str__(self): return \"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format( \"--------------------------------------------------------------------------------------------------------\",", "import MediaSourceType class CardViewInfo: def __init__(self, ms_type=MediaSourceType.NONE, url=\"\", image_url=\"\", title=\"\", description=\"\"): self.__ms_type =", "return self.__title @title.setter def title(self, new_title): self.__title = new_title @property def description(self): return", "description self.__error = \"\" @property def ms_type(self): return self.__ms_type @ms_type.setter def ms_type(self, new_ms_type):", "error(self): return self.__error @error.setter def error(self, new_error): self.__error = new_error def __str__(self): return", "return self.__error @error.setter def error(self, new_error): self.__error = new_error def __str__(self): return \"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format(", "self.__description = new_description @property def error(self): return self.__error @error.setter def error(self, new_error): self.__error", "class CardViewInfo: def __init__(self, ms_type=MediaSourceType.NONE, url=\"\", image_url=\"\", title=\"\", description=\"\"): self.__ms_type = ms_type self.__url", "= title self.__description = description self.__error = \"\" @property def ms_type(self): return self.__ms_type", "self.__image_url @image_url.setter def image_url(self, new_image_url): self.__image_url = new_image_url @property def title(self): return self.__title", "def error(self): return self.__error @error.setter def error(self, new_error): self.__error = new_error def __str__(self):", "new_ms_type @property def url(self): return self.__url @url.setter def url(self, new_url): self.__url = new_url", "new_url @property def image_url(self): return self.__image_url @image_url.setter def image_url(self, new_image_url): self.__image_url = new_image_url", "self.__url = url self.__image_url = image_url self.__title = title self.__description = description self.__error", "self.__ms_type = ms_type self.__url = url self.__image_url = image_url self.__title = title self.__description", "= new_image_url @property def title(self): return self.__title @title.setter def title(self, new_title): self.__title =", "def url(self): return self.__url @url.setter def url(self, new_url): self.__url = new_url @property def", "= new_error def __str__(self): return \"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format( \"--------------------------------------------------------------------------------------------------------\", \"ms_type\\t\\t{}\".format(self.__ms_type), \"url\\t\\t\\t{}\".format(self.__url), \"image_url\\t{}\".format(self.__image_url), \"title\\t\\t{}\".format(self.__title), \"desc\\t\\t{}\".format(self.__description), \"error\\t\\t{}\".format(self.__error),", "url=\"\", image_url=\"\", title=\"\", description=\"\"): self.__ms_type = ms_type self.__url = url self.__image_url = image_url", "@image_url.setter def image_url(self, new_image_url): self.__image_url = new_image_url @property def title(self): return self.__title @title.setter", "title=\"\", description=\"\"): self.__ms_type = ms_type self.__url = url self.__image_url = image_url self.__title =", "url(self, new_url): self.__url = new_url @property def image_url(self): return self.__image_url @image_url.setter def image_url(self,", "ms_type(self): return self.__ms_type @ms_type.setter def ms_type(self, new_ms_type): self.__ms_type = new_ms_type @property def url(self):", "return self.__ms_type @ms_type.setter def ms_type(self, new_ms_type): self.__ms_type = new_ms_type @property def url(self): return", "title(self, new_title): self.__title = new_title @property def description(self): return self.__description @description.setter def description(self,", "def description(self, new_description): self.__description = new_description @property def error(self): return self.__error @error.setter def", "self.__title = new_title @property def description(self): return self.__description @description.setter def description(self, new_description): self.__description", "image_url self.__title = title self.__description = description self.__error = \"\" @property def ms_type(self):", "self.__image_url = image_url self.__title = title self.__description = description self.__error = \"\" @property", "new_title): self.__title = new_title @property def description(self): return self.__description @description.setter def description(self, new_description):", "= ms_type self.__url = url self.__image_url = image_url self.__title = title self.__description =", "self.__error = new_error def __str__(self): return \"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format( \"--------------------------------------------------------------------------------------------------------\", \"ms_type\\t\\t{}\".format(self.__ms_type), \"url\\t\\t\\t{}\".format(self.__url), \"image_url\\t{}\".format(self.__image_url), \"title\\t\\t{}\".format(self.__title), \"desc\\t\\t{}\".format(self.__description),", "@property def image_url(self): return self.__image_url @image_url.setter def image_url(self, new_image_url): self.__image_url = new_image_url @property", "new_error def __str__(self): return \"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format( \"--------------------------------------------------------------------------------------------------------\", \"ms_type\\t\\t{}\".format(self.__ms_type), \"url\\t\\t\\t{}\".format(self.__url), \"image_url\\t{}\".format(self.__image_url), \"title\\t\\t{}\".format(self.__title), \"desc\\t\\t{}\".format(self.__description), \"error\\t\\t{}\".format(self.__error), \"--------------------------------------------------------------------------------------------------------\"", "def description(self): return self.__description @description.setter def description(self, new_description): self.__description = new_description @property def", "def image_url(self, new_image_url): self.__image_url = new_image_url @property def title(self): return self.__title @title.setter def", "new_error): self.__error = new_error def __str__(self): return \"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format( \"--------------------------------------------------------------------------------------------------------\", \"ms_type\\t\\t{}\".format(self.__ms_type), \"url\\t\\t\\t{}\".format(self.__url), \"image_url\\t{}\".format(self.__image_url), \"title\\t\\t{}\".format(self.__title),", "= description self.__error = \"\" @property def ms_type(self): return self.__ms_type @ms_type.setter def ms_type(self,", "__init__(self, ms_type=MediaSourceType.NONE, url=\"\", image_url=\"\", title=\"\", description=\"\"): self.__ms_type = ms_type self.__url = url self.__image_url", "def __str__(self): return \"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format( \"--------------------------------------------------------------------------------------------------------\", \"ms_type\\t\\t{}\".format(self.__ms_type), \"url\\t\\t\\t{}\".format(self.__url), \"image_url\\t{}\".format(self.__image_url), \"title\\t\\t{}\".format(self.__title), \"desc\\t\\t{}\".format(self.__description), \"error\\t\\t{}\".format(self.__error), \"--------------------------------------------------------------------------------------------------------\" )", "@property def ms_type(self): return self.__ms_type @ms_type.setter def ms_type(self, new_ms_type): self.__ms_type = new_ms_type @property", "self.__title @title.setter def title(self, new_title): self.__title = new_title @property def description(self): return self.__description", "\"\" @property def ms_type(self): return self.__ms_type @ms_type.setter def ms_type(self, new_ms_type): self.__ms_type = new_ms_type", "def title(self): return self.__title @title.setter def title(self, new_title): self.__title = new_title @property def", "self.__url = new_url @property def image_url(self): return self.__image_url @image_url.setter def image_url(self, new_image_url): self.__image_url", "description=\"\"): self.__ms_type = ms_type self.__url = url self.__image_url = image_url self.__title = title", "def __init__(self, ms_type=MediaSourceType.NONE, url=\"\", image_url=\"\", title=\"\", description=\"\"): self.__ms_type = ms_type self.__url = url", "= new_ms_type @property def url(self): return self.__url @url.setter def url(self, new_url): self.__url =", "@description.setter def description(self, new_description): self.__description = new_description @property def error(self): return self.__error @error.setter", "ms_type(self, new_ms_type): self.__ms_type = new_ms_type @property def url(self): return self.__url @url.setter def url(self,", "@property def description(self): return self.__description @description.setter def description(self, new_description): self.__description = new_description @property", "new_title @property def description(self): return self.__description @description.setter def description(self, new_description): self.__description = new_description", "def image_url(self): return self.__image_url @image_url.setter def image_url(self, new_image_url): self.__image_url = new_image_url @property def", "def title(self, new_title): self.__title = new_title @property def description(self): return self.__description @description.setter def", "url(self): return self.__url @url.setter def url(self, new_url): self.__url = new_url @property def image_url(self):", "return self.__image_url @image_url.setter def image_url(self, new_image_url): self.__image_url = new_image_url @property def title(self): return", "= url self.__image_url = image_url self.__title = title self.__description = description self.__error =", "new_image_url): self.__image_url = new_image_url @property def title(self): return self.__title @title.setter def title(self, new_title):", "title(self): return self.__title @title.setter def title(self, new_title): self.__title = new_title @property def description(self):", "url self.__image_url = image_url self.__title = title self.__description = description self.__error = \"\"", "ms_type self.__url = url self.__image_url = image_url self.__title = title self.__description = description", "from .mediasourcetype import MediaSourceType class CardViewInfo: def __init__(self, ms_type=MediaSourceType.NONE, url=\"\", image_url=\"\", title=\"\", description=\"\"):", "= new_url @property def image_url(self): return self.__image_url @image_url.setter def image_url(self, new_image_url): self.__image_url =", "new_url): self.__url = new_url @property def image_url(self): return self.__image_url @image_url.setter def image_url(self, new_image_url):", "return self.__url @url.setter def url(self, new_url): self.__url = new_url @property def image_url(self): return", "self.__description = description self.__error = \"\" @property def ms_type(self): return self.__ms_type @ms_type.setter def", ".mediasourcetype import MediaSourceType class CardViewInfo: def __init__(self, ms_type=MediaSourceType.NONE, url=\"\", image_url=\"\", title=\"\", description=\"\"): self.__ms_type", "image_url(self): return self.__image_url @image_url.setter def image_url(self, new_image_url): self.__image_url = new_image_url @property def title(self):", "@property def url(self): return self.__url @url.setter def url(self, new_url): self.__url = new_url @property", "new_ms_type): self.__ms_type = new_ms_type @property def url(self): return self.__url @url.setter def url(self, new_url):", "image_url=\"\", title=\"\", description=\"\"): self.__ms_type = ms_type self.__url = url self.__image_url = image_url self.__title", "new_description @property def error(self): return self.__error @error.setter def error(self, new_error): self.__error = new_error", "@error.setter def error(self, new_error): self.__error = new_error def __str__(self): return \"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format( \"--------------------------------------------------------------------------------------------------------\", \"ms_type\\t\\t{}\".format(self.__ms_type),", "= new_title @property def description(self): return self.__description @description.setter def description(self, new_description): self.__description =", "CardViewInfo: def __init__(self, ms_type=MediaSourceType.NONE, url=\"\", image_url=\"\", title=\"\", description=\"\"): self.__ms_type = ms_type self.__url =", "@url.setter def url(self, new_url): self.__url = new_url @property def image_url(self): return self.__image_url @image_url.setter", "def ms_type(self, new_ms_type): self.__ms_type = new_ms_type @property def url(self): return self.__url @url.setter def", "self.__url @url.setter def url(self, new_url): self.__url = new_url @property def image_url(self): return self.__image_url", "self.__error = \"\" @property def ms_type(self): return self.__ms_type @ms_type.setter def ms_type(self, new_ms_type): self.__ms_type", "self.__description @description.setter def description(self, new_description): self.__description = new_description @property def error(self): return self.__error", "error(self, new_error): self.__error = new_error def __str__(self): return \"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format( \"--------------------------------------------------------------------------------------------------------\", \"ms_type\\t\\t{}\".format(self.__ms_type), \"url\\t\\t\\t{}\".format(self.__url), \"image_url\\t{}\".format(self.__image_url),", "@title.setter def title(self, new_title): self.__title = new_title @property def description(self): return self.__description @description.setter", "ms_type=MediaSourceType.NONE, url=\"\", image_url=\"\", title=\"\", description=\"\"): self.__ms_type = ms_type self.__url = url self.__image_url =", "= image_url self.__title = title self.__description = description self.__error = \"\" @property def", "description(self): return self.__description @description.setter def description(self, new_description): self.__description = new_description @property def error(self):", "self.__ms_type = new_ms_type @property def url(self): return self.__url @url.setter def url(self, new_url): self.__url", "image_url(self, new_image_url): self.__image_url = new_image_url @property def title(self): return self.__title @title.setter def title(self,", "title self.__description = description self.__error = \"\" @property def ms_type(self): return self.__ms_type @ms_type.setter", "@ms_type.setter def ms_type(self, new_ms_type): self.__ms_type = new_ms_type @property def url(self): return self.__url @url.setter", "def url(self, new_url): self.__url = new_url @property def image_url(self): return self.__image_url @image_url.setter def", "new_image_url @property def title(self): return self.__title @title.setter def title(self, new_title): self.__title = new_title", "return self.__description @description.setter def description(self, new_description): self.__description = new_description @property def error(self): return", "= \"\" @property def ms_type(self): return self.__ms_type @ms_type.setter def ms_type(self, new_ms_type): self.__ms_type =", "self.__title = title self.__description = description self.__error = \"\" @property def ms_type(self): return", "= new_description @property def error(self): return self.__error @error.setter def error(self, new_error): self.__error =", "@property def title(self): return self.__title @title.setter def title(self, new_title): self.__title = new_title @property", "def error(self, new_error): self.__error = new_error def __str__(self): return \"{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\".format( \"--------------------------------------------------------------------------------------------------------\", \"ms_type\\t\\t{}\".format(self.__ms_type), \"url\\t\\t\\t{}\".format(self.__url),", "MediaSourceType class CardViewInfo: def __init__(self, ms_type=MediaSourceType.NONE, url=\"\", image_url=\"\", title=\"\", description=\"\"): self.__ms_type = ms_type", "self.__image_url = new_image_url @property def title(self): return self.__title @title.setter def title(self, new_title): self.__title", "description(self, new_description): self.__description = new_description @property def error(self): return self.__error @error.setter def error(self,", "new_description): self.__description = new_description @property def error(self): return self.__error @error.setter def error(self, new_error):", "@property def error(self): return self.__error @error.setter def error(self, new_error): self.__error = new_error def", "self.__ms_type @ms_type.setter def ms_type(self, new_ms_type): self.__ms_type = new_ms_type @property def url(self): return self.__url", "def ms_type(self): return self.__ms_type @ms_type.setter def ms_type(self, new_ms_type): self.__ms_type = new_ms_type @property def" ]
[ "random.random() < 0.1: row.append(-1) else: row.append(0) self.map.append(row) def countrocks(self): self.rocks = 0 for", "generation = [] # Garden creation garden = Garden(map) # Creation of starting", "# Moving left def left(self, column, number): self.start = (number - column, column", "generation = [] # Garden creation garden = GGarden() # Creation of starting", "0: continue # If we fing any obstacle we change movement direction if", "if 'X' not in nv: self.fitnessFunc() return break if goTo == '3' or", "self.left(column, number) elif row + column <= number < column + column +", "= [ int(a) for a in bin(random.randrange(1024)) [2:].zfill(10) ] class Chromosome: def __init__(self,", "test1 = [ [00, 00, 00, 00, 00, 00, 00, 00, 00, 00,", "elif inp == \"Generate\" or inp == \"generate\": # Generate Function - generate", "1 elif goTo == '4': position[1] -= 1 # We choose new tile", "Gene(self.garden) # New rotations elif number < 0.2: newChr.genes[i].rotate() # Crossing of new", "if not we move there if g.map[position[0]][position[1]] == 0: continue # If we", "00, 00, -1, 00, 00, 00, 00, 00, 00, 00], [00, 00, -1,", "- best.fitness)) print(\"_________________________________________________________\") print(\"Initial Garden\") # Transforming garden into clear grided output helper", "\"Generate\" or inp == \"generate\": # Generate Function - generate Random garden print(\"Counting...\")", "i in range(800): # Saving the best chromosome bestChr = max(generation, key=lambda x:", "00, -1, 00, 00, 00], [00, 00, 00, 00, 00, 00, -1, -1,", "-1, 00, 00, 00, 00, 00, 00], [00, -1, 00, 00, 00, 00,", "00, 00], ] test3 = [ [00, 00, 00, 00, 00, 00, 00,", "= sum(self.fGarden.map, []) for x in itera: if x > 0: self.fitness +=", "nv = [] for p in helper: try: nv.append(g.map[p[0]][p[1]]) except IndexError: nv.append('X') #", "position[1] + 1]) else: helper = ([position[0] - 1, position[1]], [position[0] + 1,", "def fitnessFunc(self): self.fitness = 0 itera = sum(self.fGarden.map, []) for x in itera:", "= i # Formatted Print finalprint(garden, bestChr, number) def finalprint(garden, best, number): print()", "def rotate(self): self.rotation = [ int(a) for a in bin(random.randrange(1024)) [2:].zfill(10) ] class", "generateMap() elif inp == \"Test\" or inp == \"test\": # Test Function -", "position[1] += 1 elif goTo == '4': position[1] -= 1 # We choose", "generation information print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' % (number + 1, garden.max_fitness, best.fitness)) print('Tiles Left:", "loading from File if inp == \"File\" or inp == \"file\": print(\"Counting...\") file", "def fillMap(self, map): self.m = len(map) self.n = len(map[0]) self.map = copy.deepcopy(map) def", "for i in range(50): generation.append(Chromosome(garden)) # Generations creation for i in range(800): #", "goTo == '1': helper = ([position[0], position[1] - 1], [position[0], position[1] + 1])", "random.randrange(2, 11) self.n = random.randrange(2, 11) self.map = [] for i in range(self.m):", "= number # We chose next tile to move on if goTo ==", "g = self.fGarden number = 0 # Start of genes iterations for gene", "of chromosones for i in range(50): generation.append(Chromosome(garden)) # Generations creation for i in", "00, -1, 00, 00, 00, 00, 00, 00, 00], [00, -1, -1, -1,", "if random.random() < 0.1: row.append(-1) else: row.append(0) self.map.append(row) def countrocks(self): self.rocks = 0", "print(helper) print(\"_________________________________________________________\") print(\"Final Result\") # Transforming solved garden into clear grided output helper", "edge of the map if position[0] not in range(g.m) or position[1] not in", "crossing(self, other): # Create new chromosome with empty genes newChr = Chromosome(self.garden, False)", "number < column: # Down self.down(number) elif column <= number < row +", "goTo == '2': position[1] += 1 elif goTo == '4': position[1] -= 1", "+= 1 if x == len(gene.rotation): x = 0 # If everything is", "<= number < column + column + row: # up self.up(row, column, number)", "__init__(self, garden, start=True): self.garden = garden self.fGarden = Garden(self.garden.map) self.genesFill(garden, start) def genesFill(self,", "a random one\") print(\"_________________________________________________________\") print(\"Write: File - Load from file\") print(\"Write: Generate -", "newChr.genes = random.choice((self.genes, other.genes)) # Mutations if mutateNum < 0.5: self.mutate(newChr) newChr.algo() return", "+ column)) # Each direction has its own function for movement if number", "first type - 2 - first part is from Chromosome 1, second is", "< 0.5: self.mutate(newChr) newChr.algo() return newChr def solveMap(map): # Inicialisation of variables generation", "00, 00, 00, 00, 00, 00], [00, 00, 00, 00, 00, 00, 00,", "len(map) self.n = len(map[0]) self.map = copy.deepcopy(map) def countrocks(self): self.rocks = 0 for", "+ column) + (row + column)) # Each direction has its own function", "Check if we didnt find solution if bestChr.fitness == garden.max_fitness: break # Creating", "00, 00, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00, 00,", "00, 00, 00, 00, -1, 00, -1, 00, 00, 00], [00, 00, 00,", "00, 00, 00, 00, 00], [00, 00, -1, 00, 00, 00, 00, 00,", "if g.map[position[0]][position[1]] != 0: continue number += 1 while (1): # Rake the", "def mutate(self, newChr): for i in range(len(newChr.genes)): # New chromosome number = random.random()", "self.map.append(row) def countrocks(self): self.rocks = 0 for i in self.map: for j in", "fitness(self): self.max_fitness = 0 for i in self.map: for j in i: if", "= ((row + column) + (row + column) - number - 1, 0)", "for gene in self.genes: position = list(gene.start) goTo = gene.goTo x = 0", "in self.genes: position = list(gene.start) goTo = gene.goTo x = 0 # We", "print(\"Counting...\") generateMap() elif inp == \"Test\" or inp == \"test\": # Test Function", "helper = \"\" for x in best.fGarden.map: for y in x: if y", "= [ [00, 00, 00, 00, 00, 00, 00, -1, 00, -1, 00,", "+ column) + (row + column) - number - 1, 0) self.goTo =", "print(\"Test2 - Unsolvable test\") print(\"Test3 - Test with staying in the garden\") print(\"Test4", "the best chromosome bestChr = max(generation, key=lambda x: x.fitness) nextGeneration = [bestChr] #", "self.genes.append(Gene(self.garden)) self.algo() def fitnessFunc(self): self.fitness = 0 itera = sum(self.fGarden.map, []) for x", "garden.m column = garden.n number = random.randrange((row + column) + (row + column))", "+ 1, garden.max_fitness, best.fitness)) print('Tiles Left: %d' % (garden.max_fitness - best.fitness)) print(\"_________________________________________________________\") print(\"Initial", "Create new chromosome with empty genes newChr = Chromosome(self.garden, False) # Crossing process", "we find one not raken if nv.count(0) == 1: position = helper[nv.index(0)] #", "We chose next tile to move on if goTo == '3': position[0] -=", "def fitness(self): self.max_fitness = 0 for i in self.map: for j in i:", "elif goTo == '1': position[0] -= 1 elif goTo == '2': position[1] +=", "row = [] riadok = f.readline() if riadok == '': break pocet +=", "00, 00, 00, 00, 00, 00, -1, -1, -1, 00, 00], [00, 00,", "x: x.fitness) nextGeneration = [bestChr] # Check if we didnt find solution if", "' K ' else: helper += '%2d ' % y helper += '\\n'", "range(49): # Choose random chromosomes from current generation chromosome1, chromosome2 = sorted(random.sample(generation, 4),", "for i in range(helper): self.genes.append(Gene(self.garden)) self.algo() def fitnessFunc(self): self.fitness = 0 itera =", "fitnessFunc(self): self.fitness = 0 itera = sum(self.fGarden.map, []) for x in itera: if", "'1': if helper.index(position) == 0: goTo = '2' else: goTo = '4' else:", "00, 00, 00, 00, 00, 00, 00, 00, 00], ] test2 = [", "self.map = [] for i in range(self.m): row = [] for j in", "00, 00, 00, 00, 00], ] test2 = [ [00, 00, 00, 00,", "x in itera: if x > 0: self.fitness += 1 def algo(self): g", "helper.index(position) == 0: goTo = '2' else: goTo = '4' else: if helper.index(position)", "column <= number < row + column: # Left self.left(column, number) elif row", "return newChr def solveMap(map): # Inicialisation of variables generation = [] # Garden", "to load a map from a file or generate a random one\") print(\"_________________________________________________________\")", "Mutation of chromosomes def mutate(self, newChr): for i in range(len(newChr.genes)): # New chromosome", "'3': position[0] -= 1 elif goTo == '1': position[0] += 1 elif goTo", "00, 00, 00, 00, 00, 00], [00, 00, 00, 00, -1, 00, 00,", "+= 1 class Garden: def __init__(self, map): self.fillMap(map) self.countrocks() self.fitness() def fillMap(self, map):", "print(\"Test3 - Test with staying in the garden\") print(\"Test4 - Test with one", "Generate - Generate random map\") print(\"Write: Test - Test mode\") print(\"_________________________________________________________\") inp =", "self.max_fitness += 1 class Gene: def __init__(self, garden): row = garden.m column =", "== \"Test\" or inp == \"test\": # Test Function - Choose from availbale", "00, 00, 00, 00], [00, -1, -1, -1, -1, 00, 00, 00, 00,", "Print finalprint(garden, bestChr, number) def finalprint(garden, best, number): print() print() # Printing all", "print(\"Test4 - Test with one tile exit\") print(\"_________________________________________________________\") inp = input() if inp", "from File if inp == \"File\" or inp == \"file\": print(\"Counting...\") file =", "< 0.425: # Crossing first type - 2 - first part is from", "Garden(self.garden.map) self.genesFill(garden, start) def genesFill(self, garden, start): self.genes = [] helper = garden.m", "== 0: self.max_fitness += 1 class Gene: def __init__(self, garden): row = garden.m", "number = i # Formatted Print finalprint(garden, bestChr, number) def finalprint(garden, best, number):", "'4': position[1] -= 1 # We choose new tile if goTo == '3'", "self.n = len(map[0]) self.map = copy.deepcopy(map) def countrocks(self): self.rocks = 0 for i", "2 newChr.genes = self.genes[:pivotPoint] + other.genes[pivotPoint:] else: # Crossing third type - 3", "everything is raken else: if 'X' not in nv: self.fitnessFunc() return break if", "if inp == \"File\" or inp == \"file\": print(\"Counting...\") file = [] #", "if helper.index(position) == 0: goTo = '2' else: goTo = '4' else: if", "def __init__(self, map): self.fillMap(map) self.countrocks() self.fitness() def fillMap(self, map): self.m = len(map) self.n", "00, 00, 00, 00, -1, -1, -1, 00, 00], [00, 00, 00, 00,", "i in range(len(newChr.genes)): # New chromosome number = random.random() if number < 0.1:", "0.1: row.append(-1) else: row.append(0) self.map.append(row) def countrocks(self): self.rocks = 0 for i in", "Transforming solved garden into clear grided output helper = \"\" for x in", "Left self.left(column, number) elif row + column <= number < column + column", "the map if position[0] not in range(g.m) or position[1] not in range(g.n): break", "helper = ([position[0] - 1, position[1]], [position[0] + 1, position[1]]) # We check", "self.map: for j in i: if j == 0: self.max_fitness += 1 class", "number < 0.1: newChr.genes[i] = Gene(self.garden) # New rotations elif number < 0.2:", "copy import sys test1 = [ [00, 00, 00, 00, 00, 00, 00,", "def genesFill(self, garden, start): self.genes = [] helper = garden.m + garden.n +", "solveMap(map): # Inicialisation of variables generation = [] # Garden creation garden =", "== -1: self.rocks += 1 def fitness(self): self.max_fitness = 0 for i in", "def generateMap(): # Inicialisation of variables generation = [] # Garden creation garden", "self.rocks += 1 def fitness(self): self.max_fitness = 0 for i in self.map: for", "else: if helper.index(position) == 0: goTo = '3' else: goTo = '1' self.fitnessFunc()", "00, 00, 00, 00, -1, 00, -1, 00, 00], [00, 00, 00, 00,", "Best-Fitness:%4d' % (number + 1, garden.max_fitness, best.fitness)) print('Tiles Left: %d' % (garden.max_fitness -", "to fill generation for j in range(49): # Choose random chromosomes from current", "solveMap(test1) elif inp == '2': print(\"Counting...\") solveMap(test2) elif inp == '3': print(\"Counting...\") solveMap(test3)", "best.fitness)) print('Tiles Left: %d' % (garden.max_fitness - best.fitness)) print(\"_________________________________________________________\") print(\"Initial Garden\") # Transforming", "elif goTo == '1': position[0] += 1 elif goTo == '2': position[1] -=", "00, 00, 00, -1, 00, 00, 00, 00, 00, 00, 00], [00, 00,", "== '3' or goTo == '1': helper = ([position[0], position[1] - 1], [position[0],", "Choose random chromosomes from current generation chromosome1, chromosome2 = sorted(random.sample(generation, 4), key=lambda x:", "right(self, row, column, number): self.start = ((row + column) + (row + column)", "position[0] += 1 elif goTo == '1': position[0] -= 1 elif goTo ==", "self.genes: position = list(gene.start) goTo = gene.goTo x = 0 # We check", "x.fitness)[2:4] # Create very new Chromosome by crossing and mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation =", "< 0.1: row.append(-1) else: row.append(0) self.map.append(row) def countrocks(self): self.rocks = 0 for i", "x > 0: self.fitness += 1 def algo(self): g = self.fGarden number =", "column - 1) self.goTo = '2' # Representing left # Moving up def", "variables generation = [] # Garden creation garden = GGarden() # Creation of", "'3' # Representing up # Moving up def right(self, row, column, number): self.start", "crossNum < 0.425: # Crossing first type - 2 - first part is", "generation.append(Chromosome(garden)) # Generations creation for i in range(1000): # Saving the best chromosome", "== '3': position[0] -= 1 elif goTo == '1': position[0] += 1 elif", "chromosomes def mutate(self, newChr): for i in range(len(newChr.genes)): # New chromosome number =", "is not raken - if not we move there if g.map[position[0]][position[1]] == 0:", "i: if j == 0: self.max_fitness += 1 class Gene: def __init__(self, garden):", "< 0.85: # Crossing second type - 1 - Choosing random genes from", "+= 1 elif goTo == '1': position[0] -= 1 elif goTo == '2':", "we didnt find solution if bestChr.fitness == garden.max_fitness: break # Creating more chromosones", "self.down(number) elif column <= number < row + column: # Left self.left(column, number)", "# Down self.down(number) elif column <= number < row + column: # Left", "= random.choice((self.genes, other.genes)) # Mutations if mutateNum < 0.5: self.mutate(newChr) newChr.algo() return newChr", "random.random() pivotPoint = random.randrange(len(self.genes)) if crossNum < 0.85: # Crossing second type -", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], [00, 00,", "self.start = (number - column, column - 1) self.goTo = '2' # Representing", "== '1': if helper.index(position) == 0: goTo = '2' else: goTo = '4'", "00, 00], [00, 00, 00, 00, 00, 00, -1, 00, -1, 00, 00,", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], ] test2", "Representing left # Moving up def up(self, row, column, number): self.start = (row", "= [] for p in helper: try: nv.append(g.map[p[0]][p[1]]) except IndexError: nv.append('X') # If", "number) elif row + column <= number < column + column + row:", "up # Moving up def right(self, row, column, number): self.start = ((row +", "or position[1] not in range(g.n): break # We check if it is not", "left def left(self, column, number): self.start = (number - column, column - 1)", "nv.count(0) == 1: position = helper[nv.index(0)] # if we find two not raken", "= [ [00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00,", "00], [00, -1, -1, -1, -1, 00, 00, 00, 00, 00, 00, 00],", "left # Moving up def up(self, row, column, number): self.start = (row -", "first part is from Chromosome 1, second is from Chromosome 2 newChr.genes =", "= '4' # Representing right def rotate(self): self.rotation = [ int(a) for a", "= random.random() crossNum = random.random() pivotPoint = random.randrange(len(self.genes)) if crossNum < 0.85: #", "] class Chromosome: def __init__(self, garden, start=True): self.garden = garden self.fGarden = Garden(self.garden.map)", "goTo = '3' else: goTo = '1' self.fitnessFunc() # Mutation of chromosomes def", "= f.readline() if riadok == '': break pocet += 1 riadok = riadok.split()", "1, garden.max_fitness, best.fitness)) print('Tiles Left: %d' % (garden.max_fitness - best.fitness)) print(\"_________________________________________________________\") print(\"Initial Garden\")", "y == -1: helper += ' K ' else: helper += '%2d '", "] test2 = [ [00, 00, 00, 00, 00, 00, 00, 00, 00,", "find two not raken elif nv.count(0) == 2: position = helper[gene.rotation[x]] x +=", "(0, number) self.goTo = '1' # Representing dow # Moving left def left(self,", "class Gene: def __init__(self, garden): row = garden.m column = garden.n number =", "direction has its own function for movement if number < column: # Down", "row = [] for j in range(self.n): if random.random() < 0.1: row.append(-1) else:", "1 def algo(self): g = self.fGarden number = 0 # Start of genes", "position[1]], [position[0] + 1, position[1]]) # We check surrounding tiles nv = []", "number of test from 1 - 4\") print(\"Test1 - Model test\") print(\"Test2 -", "self.max_fitness += 1 class Garden: def __init__(self, map): self.fillMap(map) self.countrocks() self.fitness() def fillMap(self,", "inp == '2': print(\"Counting...\") solveMap(test2) elif inp == '3': print(\"Counting...\") solveMap(test3) elif inp", "00, 00, 00], [00, 00, 00, 00, 00, -1, 00, 00, 00, 00,", "Chromosome 2 newChr.genes = self.genes[:pivotPoint] + other.genes[pivotPoint:] else: # Crossing third type -", "if we can enter the garden if g.map[position[0]][position[1]] != 0: continue number +=", "Moving up def up(self, row, column, number): self.start = (row - 1, column", "= \"\" for x in garden.map: for y in x: if y ==", "self.rocks = 0 for i in self.map: for j in i: if j", "00, 00, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00, -1,", "00, 00, 00, 00], ] class GGarden: def __init__(self): self.generateRandomMap() self.countrocks() self.fitness() def", "goTo == '3': position[0] -= 1 elif goTo == '1': position[0] += 1", "= input() # File Function - loading from File if inp == \"File\"", "for j in i: if j == 0: self.max_fitness += 1 class Garden:", "from Chromosome 2 newChr.genes = self.genes[:pivotPoint] + other.genes[pivotPoint:] else: # Crossing third type", "] test3 = [ [00, 00, 00, 00, 00, 00, 00, 00, 00,", "print(\"Counting...\") solveMap(test2) elif inp == '3': print(\"Counting...\") solveMap(test3) elif inp == '4': print(\"Counting...\")", "00, 00, 00], [00, -1, 00, 00, 00, 00, 00, 00, 00, 00,", "00, 00, 00, 00], [00, 00, 00, 00, 00, 00, -1, -1, -1,", "00, 00, 00], ] test3 = [ [00, 00, 00, 00, 00, 00,", "# We choose new tile if goTo == '3' or goTo == '1':", "number # We chose next tile to move on if goTo == '3':", "left(self, column, number): self.start = (number - column, column - 1) self.goTo =", "riadok.split() for i in riadok: if i == '00': row.append(0) if i ==", "1 if x == len(gene.rotation): x = 0 # If everything is raken", "from Chromosome 1, second is from Chromosome 2 newChr.genes = self.genes[:pivotPoint] + other.genes[pivotPoint:]", "other.genes[pivotPoint:] else: # Crossing third type - 3 - No crossing newChr.genes =", "Representing right def rotate(self): self.rotation = [ int(a) for a in bin(random.randrange(1024)) [2:].zfill(10)", "bestChr.fitness == garden.max_fitness: break # Creating more chromosones to fill generation for j", "range(800): # Saving the best chromosome bestChr = max(generation, key=lambda x: x.fitness) nextGeneration", "\"generate\": # Generate Function - generate Random garden print(\"Counting...\") generateMap() elif inp ==", "for j in range(self.n): if random.random() < 0.1: row.append(-1) else: row.append(0) self.map.append(row) def", "== \"generate\": # Generate Function - generate Random garden print(\"Counting...\") generateMap() elif inp", "00, 00, 00, 00, 00, 00], [00, 00, -1, 00, 00, 00, 00,", "rotate(self): self.rotation = [ int(a) for a in bin(random.randrange(1024)) [2:].zfill(10) ] class Chromosome:", "Generate random map\") print(\"Write: Test - Test mode\") print(\"_________________________________________________________\") inp = input() #", "00, 00], [00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00,", "'%2d ' % y helper += '\\n' print(helper) print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome to Zen", "position[0] += 1 elif goTo == '2': position[1] -= 1 elif goTo ==", "print(\"Initial Garden\") # Transforming garden into clear grided output helper = \"\" for", "00], [00, 00, -1, 00, 00, 00, 00, 00, 00, 00, 00, 00],", "surrounding tiles nv = [] for p in helper: try: nv.append(g.map[p[0]][p[1]]) except IndexError:", "[ [00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],", "+ column) - number - 1, 0) self.goTo = '4' # Representing right", "-1, 00, 00, 00, 00, 00, 00, 00], [00, 00, 00, 00, 00,", "# Mutations if mutateNum < 0.5: self.mutate(newChr) newChr.algo() return newChr def solveMap(map): #", "else: helper += '%2d ' % y helper += '\\n' print(helper) print(\"_________________________________________________________\") sys.exit()", "= '1' self.fitnessFunc() # Mutation of chromosomes def mutate(self, newChr): for i in", "in range(50): generation.append(Chromosome(garden)) # Generations creation for i in range(800): # Saving the", "= sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4] # Create very new Chromosome by crossing", "random.randrange(2, 11) self.map = [] for i in range(self.m): row = [] for", "goTo == '4': position[1] -= 1 # We choose new tile if goTo", "0.5: self.mutate(newChr) newChr.algo() return newChr def solveMap(map): # Inicialisation of variables generation =", "i: if j == -1: self.rocks += 1 def fitness(self): self.max_fitness = 0", "new chromosome with empty genes newChr = Chromosome(self.garden, False) # Crossing process mutateNum", "input() if inp == '1': print(\"Counting...\") solveMap(test1) elif inp == '2': print(\"Counting...\") solveMap(test2)", "staying in the garden\") print(\"Test4 - Test with one tile exit\") print(\"_________________________________________________________\") inp", "00, 00, 00, 00, 00, 00, 00, 00, 00], ] test3 = [", "== '-1': row.append(-1) file.append(row) solveMap(file) elif inp == \"Generate\" or inp == \"generate\":", "map): self.fillMap(map) self.countrocks() self.fitness() def fillMap(self, map): self.m = len(map) self.n = len(map[0])", "+ column <= number < column + column + row: # up self.up(row,", "check if we can enter the garden if g.map[position[0]][position[1]] != 0: continue number", "00, 00, 00, 00, 00, 00, 00, 00], ] test4 = [ [00,", "a file or generate a random one\") print(\"_________________________________________________________\") print(\"Write: File - Load from", "- Test mode\") print(\"_________________________________________________________\") inp = input() # File Function - loading from", "1: position = helper[nv.index(0)] # if we find two not raken elif nv.count(0)", "break # Creating more chromosones to fill generation for j in range(49): #", "= riadok.split() for i in riadok: if i == '00': row.append(0) if i", "0.425: # Crossing first type - 2 - first part is from Chromosome", "2 - first part is from Chromosome 1, second is from Chromosome 2", "== '1': position[0] += 1 elif goTo == '2': position[1] -= 1 elif", "for y in x: if y == -1: helper += ' K '", "= [] # Garden creation garden = Garden(map) # Creation of starting set", "-1, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], [00, 00,", "= garden.n number = random.randrange((row + column) + (row + column)) # Each", "change movement direction if goTo == '3': position[0] += 1 elif goTo ==", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00], [00, 00, 00,", "00, -1, 00, -1, 00, 00, 00], [00, 00, 00, 00, 00, 00,", "key=lambda x: x.fitness) nextGeneration = [bestChr] # Check if we didnt find solution", "sum(self.fGarden.map, []) for x in itera: if x > 0: self.fitness += 1", "open(\"garden.txt\", \"r\") pocet = 0 # Transforming chars into 2D array of integers", "for i in self.map: for j in i: if j == 0: self.max_fitness", "load a map from a file or generate a random one\") print(\"_________________________________________________________\") print(\"Write:", "of the map if position[0] not in range(g.m) or position[1] not in range(g.n):", "function for movement if number < column: # Down self.down(number) elif column <=", "Saving the best chromosome bestChr = max(generation, key=lambda x: x.fitness) nextGeneration = [bestChr]", "elif inp == '3': print(\"Counting...\") solveMap(test3) elif inp == '4': print(\"Counting...\") solveMap(test4) else:", "variables generation = [] # Garden creation garden = Garden(map) # Creation of", "print(\"Counting...\") solveMap(test1) elif inp == '2': print(\"Counting...\") solveMap(test2) elif inp == '3': print(\"Counting...\")", "position[0] -= 1 elif goTo == '2': position[1] += 1 elif goTo ==", "the number of test from 1 - 4\") print(\"Test1 - Model test\") print(\"Test2", "= 0 # Transforming chars into 2D array of integers while(1): row =", "1]) else: helper = ([position[0] - 1, position[1]], [position[0] + 1, position[1]]) #", "# Representing up # Moving up def right(self, row, column, number): self.start =", "00, 00, 00], [00, 00, 00, 00, -1, 00, 00, 00, 00, 00,", "Start of genes iterations for gene in self.genes: position = list(gene.start) goTo =", "didnt find solution if bestChr.fitness == garden.max_fitness: break # Creating more chromosones to", "'1': position[0] -= 1 elif goTo == '2': position[1] += 1 elif goTo", "row.append(0) if i == '-1': row.append(-1) file.append(row) solveMap(file) elif inp == \"Generate\" or", "copy.deepcopy(map) def countrocks(self): self.rocks = 0 for i in self.map: for j in", "00, 00, 00, 00, 00, 00, 00, 00], ] class GGarden: def __init__(self):", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00], ] test2 =", "newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif crossNum < 0.425: # Crossing first type - 2 -", "i in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif crossNum < 0.425: # Crossing first type", "+ (row + column) - number - 1, 0) self.goTo = '4' #", "i: if j == 0: self.max_fitness += 1 class Garden: def __init__(self, map):", "print() # Printing all necesarry generation information print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' % (number +", "== 2: position = helper[gene.rotation[x]] x += 1 if x == len(gene.rotation): x", "-1, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00, 00, 00,", "riadok == '': break pocet += 1 riadok = riadok.split() for i in", "self.algo() def fitnessFunc(self): self.fitness = 0 itera = sum(self.fGarden.map, []) for x in", "empty genes newChr = Chromosome(self.garden, False) # Crossing process mutateNum = random.random() crossNum", "def generateRandomMap(self): self.m = random.randrange(2, 11) self.n = random.randrange(2, 11) self.map = []", "inp == \"Test\" or inp == \"test\": # Test Function - Choose from", "test4 = [ [00, 00, 00, 00, 00, 00, 00, -1, 00, -1,", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], ] test4", "00, -1, 00, 00, 00, 00, 00, 00, 00], [00, 00, -1, 00,", "from 1 - 4\") print(\"Test1 - Model test\") print(\"Test2 - Unsolvable test\") print(\"Test3", "position[1] += 1 # We check if it it not edge of the", "nv: self.fitnessFunc() return break if goTo == '3' or goTo == '1': if", "00, 00, 00, -1, 00, 00, 00, 00, 00, 00], [00, -1, 00,", "= i # Formatted Print finalprint(garden, bestChr, number) def generateMap(): # Inicialisation of", "chromosome number = random.random() if number < 0.1: newChr.genes[i] = Gene(self.garden) # New", "print(helper) print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome to Zen Garden<-----------------\") print(\"\") print(\"Choose to load a map", "-1, -1, -1, 00, 00, 00, 00, 00, 00, 00], [00, 00, 00,", "\"test\": # Test Function - Choose from availbale tests print(\"_________________________________________________________\") print(\"Choose the number", "garden.n number = random.randrange((row + column) + (row + column)) # Each direction", "number): self.start = (row - 1, column + column + row - number", "1 elif goTo == '2': position[1] -= 1 elif goTo == '4': position[1]", "number): print() print() # Printing all necesarry generation information print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' %", "if helper.index(position) == 0: goTo = '3' else: goTo = '1' self.fitnessFunc() #", "Garden<-----------------\") print(\"\") print(\"Choose to load a map from a file or generate a", "finalprint(garden, bestChr, number) def generateMap(): # Inicialisation of variables generation = [] #", "-1, -1, 00, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00,", "goTo == '3' or goTo == '1': if helper.index(position) == 0: goTo =", "helper += '\\n' print(helper) print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome to Zen Garden<-----------------\") print(\"\") print(\"Choose to", "self.start = (row - 1, column + column + row - number -", "find solution if bestChr.fitness == garden.max_fitness: break # Creating more chromosones to fill", "solveMap(test4) else: print(\"You entered wrong command\") sys.exit() else: print(\"You entered wrong command\") sys.exit()", "- No crossing newChr.genes = random.choice((self.genes, other.genes)) # Mutations if mutateNum < 0.5:", "chromosomes def crossing(self, other): # Create new chromosome with empty genes newChr =", "__init__(self, garden): row = garden.m column = garden.n number = random.randrange((row + column)", "f.readline() if riadok == '': break pocet += 1 riadok = riadok.split() for", "[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], [00,", "start): self.genes = [] helper = garden.m + garden.n + garden.rocks if start", "= garden.m column = garden.n number = random.randrange((row + column) + (row +", "map\") print(\"Write: Test - Test mode\") print(\"_________________________________________________________\") inp = input() # File Function", "# Transforming chars into 2D array of integers while(1): row = [] riadok", "== '4': print(\"Counting...\") solveMap(test4) else: print(\"You entered wrong command\") sys.exit() else: print(\"You entered", "into clear grided output helper = \"\" for x in best.fGarden.map: for y", "has its own function for movement if number < column: # Down self.down(number)", "00, 00, 00, 00, 00, 00, -1, -1, 00, 00], [00, 00, 00,", "class GGarden: def __init__(self): self.generateRandomMap() self.countrocks() self.fitness() def generateRandomMap(self): self.m = random.randrange(2, 11)", "[] for i in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif crossNum < 0.425: # Crossing", "else: # Right self.right(row, column, number) self.rotate() # Moving down def down(self, number):", "00, 00], ] test2 = [ [00, 00, 00, 00, 00, 00, 00,", "Crossing of new chromosomes def crossing(self, other): # Create new chromosome with empty", "nv.count(0) == 2: position = helper[gene.rotation[x]] x += 1 if x == len(gene.rotation):", "= [] riadok = f.readline() if riadok == '': break pocet += 1", "number) def generateMap(): # Inicialisation of variables generation = [] # Garden creation", "map from a file or generate a random one\") print(\"_________________________________________________________\") print(\"Write: File -", "number = random.randrange((row + column) + (row + column)) # Each direction has", "algo(self): g = self.fGarden number = 0 # Start of genes iterations for", "for x in itera: if x > 0: self.fitness += 1 def algo(self):", "00], ] test3 = [ [00, 00, 00, 00, 00, 00, 00, 00,", "+ garden.rocks if start == True: for i in range(helper): self.genes.append(Gene(self.garden)) self.algo() def", "in range(helper): self.genes.append(Gene(self.garden)) self.algo() def fitnessFunc(self): self.fitness = 0 itera = sum(self.fGarden.map, [])", "solveMap(test2) elif inp == '3': print(\"Counting...\") solveMap(test3) elif inp == '4': print(\"Counting...\") solveMap(test4)", "def left(self, column, number): self.start = (number - column, column - 1) self.goTo", "0: self.fitness += 1 def algo(self): g = self.fGarden number = 0 #", "00], [00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00, 00],", "range(self.n): if random.random() < 0.1: row.append(-1) else: row.append(0) self.map.append(row) def countrocks(self): self.rocks =", "-= 1 elif goTo == '2': position[1] += 1 elif goTo == '4':", "00, 00, 00, 00, 00, 00, 00, -1, -1, 00, 00], [00, 00,", "# Test Function - Choose from availbale tests print(\"_________________________________________________________\") print(\"Choose the number of", "00, 00, 00, 00, 00, 00, 00, 00, 00], [00, 00, 00, 00,", "'2': position[1] += 1 elif goTo == '4': position[1] -= 1 # We", "= random.randrange((row + column) + (row + column)) # Each direction has its", "column, number): self.start = (number - column, column - 1) self.goTo = '2'", "number < 0.2: newChr.genes[i].rotate() # Crossing of new chromosomes def crossing(self, other): #", "00, 00, 00, 00, 00, 00], [00, -1, 00, 00, 00, 00, 00,", "% y helper += '\\n' print(helper) print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome to Zen Garden<-----------------\") print(\"\")", "1 # We choose new tile if goTo == '3' or goTo ==", "00, 00], [00, -1, -1, -1, -1, 00, 00, 00, 00, 00, 00,", "3 - No crossing newChr.genes = random.choice((self.genes, other.genes)) # Mutations if mutateNum <", "down(self, number): self.start = (0, number) self.goTo = '1' # Representing dow #", "-= 1 elif goTo == '1': position[0] += 1 elif goTo == '2':", "00, 00, -1, 00, 00, 00, 00, 00, 00], [00, -1, 00, 00,", "down def down(self, number): self.start = (0, number) self.goTo = '1' # Representing", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], ] test3", "00, 00, 00, 00, 00, 00, 00, 00, 00], [00, -1, -1, -1,", "= helper[nv.index(0)] # if we find two not raken elif nv.count(0) == 2:", "clear grided output helper = \"\" for x in best.fGarden.map: for y in", "or goTo == '1': helper = ([position[0], position[1] - 1], [position[0], position[1] +", "genes from both newChr.genes = [] for i in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif", "random.randrange(len(self.genes)) if crossNum < 0.85: # Crossing second type - 1 - Choosing", "] class GGarden: def __init__(self): self.generateRandomMap() self.countrocks() self.fitness() def generateRandomMap(self): self.m = random.randrange(2,", "Formatted Print finalprint(garden, bestChr, number) def generateMap(): # Inicialisation of variables generation =", "j == -1: self.rocks += 1 def fitness(self): self.max_fitness = 0 for i", "inp == '3': print(\"Counting...\") solveMap(test3) elif inp == '4': print(\"Counting...\") solveMap(test4) else: print(\"You", "\"Test\" or inp == \"test\": # Test Function - Choose from availbale tests", "00, 00, 00, 00, 00], ] class GGarden: def __init__(self): self.generateRandomMap() self.countrocks() self.fitness()", "for a in bin(random.randrange(1024)) [2:].zfill(10) ] class Chromosome: def __init__(self, garden, start=True): self.garden", "self.m = len(map) self.n = len(map[0]) self.map = copy.deepcopy(map) def countrocks(self): self.rocks =", "- 1) self.goTo = '3' # Representing up # Moving up def right(self,", "elif number < 0.2: newChr.genes[i].rotate() # Crossing of new chromosomes def crossing(self, other):", "print(\"_________________________________________________________\") print(\"Final Result\") # Transforming solved garden into clear grided output helper =", "helper = ([position[0], position[1] - 1], [position[0], position[1] + 1]) else: helper =", "return break if goTo == '3' or goTo == '1': if helper.index(position) ==", "tile if goTo == '3' or goTo == '1': helper = ([position[0], position[1]", "[00, 00, 00, 00, 00, 00, 00, 00, -1, -1, 00, 00], [00,", "range(g.m) or position[1] not in range(g.n): break # We check if it is", "self.fitnessFunc() # Mutation of chromosomes def mutate(self, newChr): for i in range(len(newChr.genes)): #", "class Chromosome: def __init__(self, garden, start=True): self.garden = garden self.fGarden = Garden(self.garden.map) self.genesFill(garden,", "# Crossing of new chromosomes def crossing(self, other): # Create new chromosome with", "[] helper = garden.m + garden.n + garden.rocks if start == True: for", "= input() if inp == '1': print(\"Counting...\") solveMap(test1) elif inp == '2': print(\"Counting...\")", "- generate Random garden print(\"Counting...\") generateMap() elif inp == \"Test\" or inp ==", "newChr.genes = self.genes[:pivotPoint] + other.genes[pivotPoint:] else: # Crossing third type - 3 -", "00, 00, 00, 00, 00, 00, 00, 00, 00], ] class GGarden: def", "bestChr = max(generation, key=lambda x: x.fitness) nextGeneration = [bestChr] # Check if we", "or inp == \"file\": print(\"Counting...\") file = [] # File opening f =", "x in best.fGarden.map: for y in x: if y == -1: helper +=", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00], ] test3 =", "+= 1 def algo(self): g = self.fGarden number = 0 # Start of", "00, 00, 00, 00], [00, 00, 00, 00, -1, 00, 00, 00, 00,", "random.random() if number < 0.1: newChr.genes[i] = Gene(self.garden) # New rotations elif number", "self.genes[:pivotPoint] + other.genes[pivotPoint:] else: # Crossing third type - 3 - No crossing", "00, 00], [00, 00, 00, 00, 00, -1, 00, 00, 00, 00, 00,", "else: # Crossing third type - 3 - No crossing newChr.genes = random.choice((self.genes,", "row - number - 1) self.goTo = '3' # Representing up # Moving", "0) self.goTo = '4' # Representing right def rotate(self): self.rotation = [ int(a)", "elif crossNum < 0.425: # Crossing first type - 2 - first part", "-1, 00, 00, 00], [00, 00, 00, 00, 00, 00, -1, 00, -1,", "g.map[position[0]][position[1]] != 0: continue number += 1 while (1): # Rake the tile", "[2:].zfill(10) ] class Chromosome: def __init__(self, garden, start=True): self.garden = garden self.fGarden =", "position[1] - 1], [position[0], position[1] + 1]) else: helper = ([position[0] - 1,", "00, 00, 00, 00, 00, 00, 00], [00, 00, 00, 00, -1, 00,", "+ garden.n + garden.rocks if start == True: for i in range(helper): self.genes.append(Gene(self.garden))", "00, -1, 00, 00], [00, 00, 00, 00, 00, 00, 00, -1, -1,", "# Each direction has its own function for movement if number < column:", "0 # Start of genes iterations for gene in self.genes: position = list(gene.start)", "right def rotate(self): self.rotation = [ int(a) for a in bin(random.randrange(1024)) [2:].zfill(10) ]", "00, 00, 00, 00, 00, 00], [00, -1, -1, -1, -1, 00, 00,", "# Garden creation garden = Garden(map) # Creation of starting set of chromosones", "== '2': position[1] += 1 elif goTo == '4': position[1] -= 1 #", "on if goTo == '3': position[0] -= 1 elif goTo == '1': position[0]", "helper += '%2d ' % y helper += '\\n' print(helper) print(\"_________________________________________________________\") print(\"Final Result\")", "x = 0 # We check if we can enter the garden if", "while (1): # Rake the tile g.map[position[0]][position[1]] = number # We chose next", "inp == \"file\": print(\"Counting...\") file = [] # File opening f = open(\"garden.txt\",", "process mutateNum = random.random() crossNum = random.random() pivotPoint = random.randrange(len(self.genes)) if crossNum <", "- Model test\") print(\"Test2 - Unsolvable test\") print(\"Test3 - Test with staying in", "Crossing process mutateNum = random.random() crossNum = random.random() pivotPoint = random.randrange(len(self.genes)) if crossNum", "self.rotate() # Moving down def down(self, number): self.start = (0, number) self.goTo =", "if riadok == '': break pocet += 1 riadok = riadok.split() for i", "we fing any obstacle we change movement direction if goTo == '3': position[0]", "position[1]]) # We check surrounding tiles nv = [] for p in helper:", "'4' else: if helper.index(position) == 0: goTo = '3' else: goTo = '1'", "if number < column: # Down self.down(number) elif column <= number < row", "in i: if j == 0: self.max_fitness += 1 class Garden: def __init__(self,", "riadok: if i == '00': row.append(0) if i == '-1': row.append(-1) file.append(row) solveMap(file)", "output helper = \"\" for x in garden.map: for y in x: if", "self.start = ((row + column) + (row + column) - number - 1,", "%d' % (garden.max_fitness - best.fitness)) print(\"_________________________________________________________\") print(\"Initial Garden\") # Transforming garden into clear", "gene.goTo x = 0 # We check if we can enter the garden", "Mutations if mutateNum < 0.5: self.mutate(newChr) newChr.algo() return newChr def solveMap(map): # Inicialisation", "other.genes)) # Mutations if mutateNum < 0.5: self.mutate(newChr) newChr.algo() return newChr def solveMap(map):", "00, 00, 00, 00, 00, 00, 00], ] test3 = [ [00, 00,", "[00, -1, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], [00,", "def right(self, row, column, number): self.start = ((row + column) + (row +", "-= 1 elif goTo == '4': position[1] += 1 # We check if", "best.fitness)) print(\"_________________________________________________________\") print(\"Initial Garden\") # Transforming garden into clear grided output helper =", "Garden: def __init__(self, map): self.fillMap(map) self.countrocks() self.fitness() def fillMap(self, map): self.m = len(map)", "rotations elif number < 0.2: newChr.genes[i].rotate() # Crossing of new chromosomes def crossing(self,", "00, 00, 00, -1, 00, -1, 00, 00], [00, 00, 00, 00, 00,", "in range(1000): # Saving the best chromosome bestChr = max(generation, key=lambda x: x.fitness)", "starting set of chromosones for i in range(50): generation.append(Chromosome(garden)) # Generations creation for", "any obstacle we change movement direction if goTo == '3': position[0] += 1", "- 4\") print(\"Test1 - Model test\") print(\"Test2 - Unsolvable test\") print(\"Test3 - Test", "If everything is raken else: if 'X' not in nv: self.fitnessFunc() return break", "elif goTo == '4': position[1] += 1 # We check if it it", "0 # Transforming chars into 2D array of integers while(1): row = []", "00, 00, 00, 00], ] test4 = [ [00, 00, 00, 00, 00,", "0: goTo = '3' else: goTo = '1' self.fitnessFunc() # Mutation of chromosomes", "if goTo == '3' or goTo == '1': if helper.index(position) == 0: goTo", "# If we find one not raken if nv.count(0) == 1: position =", "inp == \"generate\": # Generate Function - generate Random garden print(\"Counting...\") generateMap() elif", "' % y helper += '\\n' print(helper) print(\"_________________________________________________________\") print(\"Final Result\") # Transforming solved", "generation.append(Chromosome(garden)) # Generations creation for i in range(800): # Saving the best chromosome", "the garden\") print(\"Test4 - Test with one tile exit\") print(\"_________________________________________________________\") inp = input()", "j == 0: self.max_fitness += 1 class Gene: def __init__(self, garden): row =", "00, 00, 00, 00, 00, -1, -1, -1, 00, 00], [00, 00, 00,", "-1, 00, 00, 00, 00, 00, 00, 00], [00, 00, 00, 00, -1,", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], [00, -1,", "= [] # Garden creation garden = GGarden() # Creation of starting set", "= max(generation, key=lambda x: x.fitness) nextGeneration = [bestChr] # Check if we didnt", "# Generations creation for i in range(800): # Saving the best chromosome bestChr", "position = list(gene.start) goTo = gene.goTo x = 0 # We check if", "= 0 # We check if we can enter the garden if g.map[position[0]][position[1]]", "creation garden = GGarden() # Creation of starting set of chromosones for i", "or goTo == '1': if helper.index(position) == 0: goTo = '2' else: goTo", "Inicialisation of variables generation = [] # Garden creation garden = Garden(map) #", "random genes from both newChr.genes = [] for i in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i])))", "+= 1 elif goTo == '2': position[1] -= 1 elif goTo == '4':", "== True: for i in range(helper): self.genes.append(Gene(self.garden)) self.algo() def fitnessFunc(self): self.fitness = 0", "generateMap(): # Inicialisation of variables generation = [] # Garden creation garden =", "# New rotations elif number < 0.2: newChr.genes[i].rotate() # Crossing of new chromosomes", "-1, 00, 00, 00], [00, 00, 00, 00, 00, 00, -1, -1, -1,", "random.random() crossNum = random.random() pivotPoint = random.randrange(len(self.genes)) if crossNum < 0.85: # Crossing", "= GGarden() # Creation of starting set of chromosones for i in range(50):", "def algo(self): g = self.fGarden number = 0 # Start of genes iterations", "if j == -1: self.rocks += 1 def fitness(self): self.max_fitness = 0 for", "00, 00, 00, 00, 00, 00, 00], ] test2 = [ [00, 00,", "== '4': position[1] -= 1 # We choose new tile if goTo ==", "column <= number < column + column + row: # up self.up(row, column,", "00, 00, 00, 00, 00, 00, 00], [00, 00, 00, 00, 00, -1,", "other): # Create new chromosome with empty genes newChr = Chromosome(self.garden, False) #", "- 1, position[1]], [position[0] + 1, position[1]]) # We check surrounding tiles nv", "print(\"Final Result\") # Transforming solved garden into clear grided output helper = \"\"", "+ other.genes[pivotPoint:] else: # Crossing third type - 3 - No crossing newChr.genes", "print(\"Counting...\") solveMap(test3) elif inp == '4': print(\"Counting...\") solveMap(test4) else: print(\"You entered wrong command\")", "in bin(random.randrange(1024)) [2:].zfill(10) ] class Chromosome: def __init__(self, garden, start=True): self.garden = garden", "nextGeneration = [bestChr] # Check if we didnt find solution if bestChr.fitness ==", "else: goTo = '1' self.fitnessFunc() # Mutation of chromosomes def mutate(self, newChr): for", "print() print() # Printing all necesarry generation information print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' % (number", "00, 00, 00, -1, 00, 00, 00, 00, 00, 00, 00], [00, -1,", "to Zen Garden<-----------------\") print(\"\") print(\"Choose to load a map from a file or", "We check surrounding tiles nv = [] for p in helper: try: nv.append(g.map[p[0]][p[1]])", "position = helper[nv.index(0)] # if we find two not raken elif nv.count(0) ==", "for i in self.map: for j in i: if j == -1: self.rocks", "== \"File\" or inp == \"file\": print(\"Counting...\") file = [] # File opening", "# Start of genes iterations for gene in self.genes: position = list(gene.start) goTo", "\"\" for x in garden.map: for y in x: if y == -1:", "00, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00, -1, -1,", "== len(gene.rotation): x = 0 # If everything is raken else: if 'X'", "- number - 1, 0) self.goTo = '4' # Representing right def rotate(self):", "helper.index(position) == 0: goTo = '3' else: goTo = '1' self.fitnessFunc() # Mutation", "-1, 00, 00, 00, 00, 00, 00, 00], [00, -1, -1, -1, -1,", "# Generations creation for i in range(1000): # Saving the best chromosome bestChr", "pocet += 1 riadok = riadok.split() for i in riadok: if i ==", "0.1: newChr.genes[i] = Gene(self.garden) # New rotations elif number < 0.2: newChr.genes[i].rotate() #", "into 2D array of integers while(1): row = [] riadok = f.readline() if", "its own function for movement if number < column: # Down self.down(number) elif", "00], [00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],", "-= 1 # We choose new tile if goTo == '3' or goTo", "continue # If we fing any obstacle we change movement direction if goTo", "1 # We check if it it not edge of the map if", "next tile to move on if goTo == '3': position[0] -= 1 elif", "in range(g.n): break # We check if it is not raken - if", "very new Chromosome by crossing and mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation = nextGeneration number =", "Each direction has its own function for movement if number < column: #", "# Create very new Chromosome by crossing and mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation = nextGeneration", "column, column - 1) self.goTo = '2' # Representing left # Moving up", "'00': row.append(0) if i == '-1': row.append(-1) file.append(row) solveMap(file) elif inp == \"Generate\"", "row, column, number): self.start = (row - 1, column + column + row", "'3' or goTo == '1': helper = ([position[0], position[1] - 1], [position[0], position[1]", "it not edge of the map if position[0] not in range(g.m) or position[1]", "break pocet += 1 riadok = riadok.split() for i in riadok: if i", "'2': print(\"Counting...\") solveMap(test2) elif inp == '3': print(\"Counting...\") solveMap(test3) elif inp == '4':", "newChr.genes[i] = Gene(self.garden) # New rotations elif number < 0.2: newChr.genes[i].rotate() # Crossing", "# We check surrounding tiles nv = [] for p in helper: try:", "False) # Crossing process mutateNum = random.random() crossNum = random.random() pivotPoint = random.randrange(len(self.genes))", "fillMap(self, map): self.m = len(map) self.n = len(map[0]) self.map = copy.deepcopy(map) def countrocks(self):", "own function for movement if number < column: # Down self.down(number) elif column", "if number < 0.1: newChr.genes[i] = Gene(self.garden) # New rotations elif number <", "= random.randrange(len(self.genes)) if crossNum < 0.85: # Crossing second type - 1 -", "into clear grided output helper = \"\" for x in garden.map: for y", "column: # Down self.down(number) elif column <= number < row + column: #", "self.right(row, column, number) self.rotate() # Moving down def down(self, number): self.start = (0,", "helper += '\\n' print(helper) print(\"_________________________________________________________\") print(\"Final Result\") # Transforming solved garden into clear", "\"File\" or inp == \"file\": print(\"Counting...\") file = [] # File opening f", "number): self.start = ((row + column) + (row + column) - number -", "== \"test\": # Test Function - Choose from availbale tests print(\"_________________________________________________________\") print(\"Choose the", "goTo == '1': position[0] += 1 elif goTo == '2': position[1] -= 1", "# Formatted Print finalprint(garden, bestChr, number) def generateMap(): # Inicialisation of variables generation", "[00, 00, -1, 00, 00, 00, 00, 00, 00, 00, 00, 00], [00,", "in range(g.m) or position[1] not in range(g.n): break # We check if it", "current generation chromosome1, chromosome2 = sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4] # Create very", "generation = nextGeneration number = i # Formatted Print finalprint(garden, bestChr, number) def", "i in riadok: if i == '00': row.append(0) if i == '-1': row.append(-1)", "0 for i in self.map: for j in i: if j == 0:", "garden = GGarden() # Creation of starting set of chromosones for i in", "# Generate Function - generate Random garden print(\"Counting...\") generateMap() elif inp == \"Test\"", "grided output helper = \"\" for x in best.fGarden.map: for y in x:", "number - 1, 0) self.goTo = '4' # Representing right def rotate(self): self.rotation", "0: goTo = '2' else: goTo = '4' else: if helper.index(position) == 0:", "[ [00, 00, 00, 00, 00, 00, 00, -1, 00, -1, 00, 00],", "# Formatted Print finalprint(garden, bestChr, number) def finalprint(garden, best, number): print() print() #", "a in bin(random.randrange(1024)) [2:].zfill(10) ] class Chromosome: def __init__(self, garden, start=True): self.garden =", "= Gene(self.garden) # New rotations elif number < 0.2: newChr.genes[i].rotate() # Crossing of", "else: if 'X' not in nv: self.fitnessFunc() return break if goTo == '3'", "(row + column)) # Each direction has its own function for movement if", "while(1): row = [] riadok = f.readline() if riadok == '': break pocet", "Chromosome: def __init__(self, garden, start=True): self.garden = garden self.fGarden = Garden(self.garden.map) self.genesFill(garden, start)", "'4': print(\"Counting...\") solveMap(test4) else: print(\"You entered wrong command\") sys.exit() else: print(\"You entered wrong", "helper[gene.rotation[x]] x += 1 if x == len(gene.rotation): x = 0 # If", "if nv.count(0) == 1: position = helper[nv.index(0)] # if we find two not", "if goTo == '3' or goTo == '1': helper = ([position[0], position[1] -", "helper: try: nv.append(g.map[p[0]][p[1]]) except IndexError: nv.append('X') # If we find one not raken", "0 for i in self.map: for j in i: if j == -1:", "= self.fGarden number = 0 # Start of genes iterations for gene in", "1, 0) self.goTo = '4' # Representing right def rotate(self): self.rotation = [", "i # Formatted Print finalprint(garden, bestChr, number) def generateMap(): # Inicialisation of variables", "helper[nv.index(0)] # if we find two not raken elif nv.count(0) == 2: position", "i == '00': row.append(0) if i == '-1': row.append(-1) file.append(row) solveMap(file) elif inp", "generate a random one\") print(\"_________________________________________________________\") print(\"Write: File - Load from file\") print(\"Write: Generate", "-1: helper += ' K ' else: helper += '%2d ' % y", "row + column: # Left self.left(column, number) elif row + column <= number", "up def right(self, row, column, number): self.start = ((row + column) + (row", "if g.map[position[0]][position[1]] == 0: continue # If we fing any obstacle we change", "garden.map: for y in x: if y == -1: helper += ' K", "00, 00, 00, 00, 00, 00, 00], [00, -1, -1, -1, -1, 00,", "type - 1 - Choosing random genes from both newChr.genes = [] for", "= ([position[0], position[1] - 1], [position[0], position[1] + 1]) else: helper = ([position[0]", "column + row: # up self.up(row, column, number) else: # Right self.right(row, column,", "== 0: self.max_fitness += 1 class Garden: def __init__(self, map): self.fillMap(map) self.countrocks() self.fitness()", "new tile if goTo == '3' or goTo == '1': helper = ([position[0],", "of test from 1 - 4\") print(\"Test1 - Model test\") print(\"Test2 - Unsolvable", "nextGeneration.append(chromosome1.crossing(chromosome2)) generation = nextGeneration number = i # Formatted Print finalprint(garden, bestChr, number)", "chromosones for i in range(50): generation.append(Chromosome(garden)) # Generations creation for i in range(800):", "def solveMap(map): # Inicialisation of variables generation = [] # Garden creation garden", "[bestChr] # Check if we didnt find solution if bestChr.fitness == garden.max_fitness: break", "00, 00, 00, 00, 00, 00, 00], ] test4 = [ [00, 00,", "New chromosome number = random.random() if number < 0.1: newChr.genes[i] = Gene(self.garden) #", "continue number += 1 while (1): # Rake the tile g.map[position[0]][position[1]] = number", "self.fGarden = Garden(self.garden.map) self.genesFill(garden, start) def genesFill(self, garden, start): self.genes = [] helper", "self.goTo = '4' # Representing right def rotate(self): self.rotation = [ int(a) for", "if x > 0: self.fitness += 1 def algo(self): g = self.fGarden number", "garden, start=True): self.garden = garden self.fGarden = Garden(self.garden.map) self.genesFill(garden, start) def genesFill(self, garden,", "Moving left def left(self, column, number): self.start = (number - column, column -", "the tile g.map[position[0]][position[1]] = number # We chose next tile to move on", "raken elif nv.count(0) == 2: position = helper[gene.rotation[x]] x += 1 if x", "== '00': row.append(0) if i == '-1': row.append(-1) file.append(row) solveMap(file) elif inp ==", "print(\"_________________________________________________________\") print(\"Choose the number of test from 1 - 4\") print(\"Test1 - Model", "00], [00, 00, 00, 00, 00, 00, -1, 00, -1, 00, 00, 00],", "opening f = open(\"garden.txt\", \"r\") pocet = 0 # Transforming chars into 2D", "00, 00, 00, 00, 00], [00, -1, 00, 00, 00, 00, 00, 00,", "'%2d ' % y helper += '\\n' print(helper) print(\"_________________________________________________________\") print(\"Final Result\") # Transforming", "\"r\") pocet = 0 # Transforming chars into 2D array of integers while(1):", "00, 00, 00, 00, 00, -1, 00, -1, 00, 00, 00], [00, 00,", "+= 1 def fitness(self): self.max_fitness = 0 for i in self.map: for j", "goTo == '3' or goTo == '1': helper = ([position[0], position[1] - 1],", "necesarry generation information print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' % (number + 1, garden.max_fitness, best.fitness)) print('Tiles", "= '1' # Representing dow # Moving left def left(self, column, number): self.start", "range(self.m): row = [] for j in range(self.n): if random.random() < 0.1: row.append(-1)", "1) self.goTo = '3' # Representing up # Moving up def right(self, row,", "self.garden = garden self.fGarden = Garden(self.garden.map) self.genesFill(garden, start) def genesFill(self, garden, start): self.genes", "test2 = [ [00, 00, 00, 00, 00, 00, 00, 00, 00, 00,", "x.fitness) nextGeneration = [bestChr] # Check if we didnt find solution if bestChr.fitness", "column) - number - 1, 0) self.goTo = '4' # Representing right def", "position[1] -= 1 elif goTo == '4': position[1] += 1 # We check", "00, 00, 00, 00, 00, 00, 00], [00, 00, 00, 00, 00, 00,", "of chromosomes def mutate(self, newChr): for i in range(len(newChr.genes)): # New chromosome number", "'': break pocet += 1 riadok = riadok.split() for i in riadok: if", "self.n = random.randrange(2, 11) self.map = [] for i in range(self.m): row =", "print(\"_________________________________________________________\") inp = input() if inp == '1': print(\"Counting...\") solveMap(test1) elif inp ==", "inp == \"Generate\" or inp == \"generate\": # Generate Function - generate Random", "number < row + column: # Left self.left(column, number) elif row + column", "00, 00], [00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00,", "00, 00, 00, 00, 00, 00], [00, 00, 00, 00, 00, -1, 00,", "'3' or goTo == '1': if helper.index(position) == 0: goTo = '2' else:", "[ int(a) for a in bin(random.randrange(1024)) [2:].zfill(10) ] class Chromosome: def __init__(self, garden,", "'3': position[0] += 1 elif goTo == '1': position[0] -= 1 elif goTo", "check if it it not edge of the map if position[0] not in", "for i in range(1000): # Saving the best chromosome bestChr = max(generation, key=lambda", "is from Chromosome 1, second is from Chromosome 2 newChr.genes = self.genes[:pivotPoint] +", "or generate a random one\") print(\"_________________________________________________________\") print(\"Write: File - Load from file\") print(\"Write:", "column) + (row + column)) # Each direction has its own function for", "in range(49): # Choose random chromosomes from current generation chromosome1, chromosome2 = sorted(random.sample(generation,", "'\\n' print(helper) print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome to Zen Garden<-----------------\") print(\"\") print(\"Choose to load a", "00, 00, 00, 00, 00, 00], [00, 00, 00, 00, 00, 00, -1,", "elif column <= number < row + column: # Left self.left(column, number) elif", "other.genes[i]))) elif crossNum < 0.425: # Crossing first type - 2 - first", "or inp == \"generate\": # Generate Function - generate Random garden print(\"Counting...\") generateMap()", "garden = Garden(map) # Creation of starting set of chromosones for i in", "# Check if we didnt find solution if bestChr.fitness == garden.max_fitness: break #", "# Transforming solved garden into clear grided output helper = \"\" for x", "random map\") print(\"Write: Test - Test mode\") print(\"_________________________________________________________\") inp = input() # File", "raken if nv.count(0) == 1: position = helper[nv.index(0)] # if we find two", "00, 00, 00], ] test4 = [ [00, 00, 00, 00, 00, 00,", "== -1: helper += ' K ' else: helper += '%2d ' %", "- if not we move there if g.map[position[0]][position[1]] == 0: continue # If", "Representing up # Moving up def right(self, row, column, number): self.start = ((row", "we can enter the garden if g.map[position[0]][position[1]] != 0: continue number += 1", "Printing all necesarry generation information print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' % (number + 1, garden.max_fitness,", "chromosome with empty genes newChr = Chromosome(self.garden, False) # Crossing process mutateNum =", "self.map: for j in i: if j == -1: self.rocks += 1 def", "with empty genes newChr = Chromosome(self.garden, False) # Crossing process mutateNum = random.random()", "not raken - if not we move there if g.map[position[0]][position[1]] == 0: continue", "for i in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif crossNum < 0.425: # Crossing first", "g.map[position[0]][position[1]] == 0: continue # If we fing any obstacle we change movement", "1, position[1]], [position[0] + 1, position[1]]) # We check surrounding tiles nv =", "if crossNum < 0.85: # Crossing second type - 1 - Choosing random", "one\") print(\"_________________________________________________________\") print(\"Write: File - Load from file\") print(\"Write: Generate - Generate random", "+= ' K ' else: helper += '%2d ' % y helper +=", "< 0.2: newChr.genes[i].rotate() # Crossing of new chromosomes def crossing(self, other): # Create", "+= '%2d ' % y helper += '\\n' print(helper) print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome to", "00, 00, 00, -1, -1, 00, 00], [00, 00, 00, 00, 00, 00,", "- 1, column + column + row - number - 1) self.goTo =", "garden.n + garden.rocks if start == True: for i in range(helper): self.genes.append(Gene(self.garden)) self.algo()", "Zen Garden<-----------------\") print(\"\") print(\"Choose to load a map from a file or generate", "== garden.max_fitness: break # Creating more chromosones to fill generation for j in", "# Representing right def rotate(self): self.rotation = [ int(a) for a in bin(random.randrange(1024))", "= [] # File opening f = open(\"garden.txt\", \"r\") pocet = 0 #", "goTo == '2': position[1] -= 1 elif goTo == '4': position[1] += 1", "Max-Fitness:%4d Best-Fitness:%4d' % (number + 1, garden.max_fitness, best.fitness)) print('Tiles Left: %d' % (garden.max_fitness", "grided output helper = \"\" for x in garden.map: for y in x:", "bestChr, number) def generateMap(): # Inicialisation of variables generation = [] # Garden", "number += 1 while (1): # Rake the tile g.map[position[0]][position[1]] = number #", "sys.exit() print(\"----------------->Welcome to Zen Garden<-----------------\") print(\"\") print(\"Choose to load a map from a", "Test with one tile exit\") print(\"_________________________________________________________\") inp = input() if inp == '1':", "sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4] # Create very new Chromosome by crossing and", "% (number + 1, garden.max_fitness, best.fitness)) print('Tiles Left: %d' % (garden.max_fitness - best.fitness))", "File - Load from file\") print(\"Write: Generate - Generate random map\") print(\"Write: Test", "clear grided output helper = \"\" for x in garden.map: for y in", "Function - generate Random garden print(\"Counting...\") generateMap() elif inp == \"Test\" or inp", "== 0: continue # If we fing any obstacle we change movement direction", "Create very new Chromosome by crossing and mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation = nextGeneration number", "is raken else: if 'X' not in nv: self.fitnessFunc() return break if goTo", "riadok = f.readline() if riadok == '': break pocet += 1 riadok =", "goTo = gene.goTo x = 0 # We check if we can enter", "in range(800): # Saving the best chromosome bestChr = max(generation, key=lambda x: x.fitness)", "# If everything is raken else: if 'X' not in nv: self.fitnessFunc() return", "range(len(newChr.genes)): # New chromosome number = random.random() if number < 0.1: newChr.genes[i] =", "from both newChr.genes = [] for i in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif crossNum", "fill generation for j in range(49): # Choose random chromosomes from current generation", "in the garden\") print(\"Test4 - Test with one tile exit\") print(\"_________________________________________________________\") inp =", "number): self.start = (0, number) self.goTo = '1' # Representing dow # Moving", "00, 00, 00, 00, 00, 00, 00, 00], ] test3 = [ [00,", "chose next tile to move on if goTo == '3': position[0] -= 1", "print(\"Write: File - Load from file\") print(\"Write: Generate - Generate random map\") print(\"Write:", "00, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00, 00, 00,", "# Crossing third type - 3 - No crossing newChr.genes = random.choice((self.genes, other.genes))", "move on if goTo == '3': position[0] -= 1 elif goTo == '1':", "= list(gene.start) goTo = gene.goTo x = 0 # We check if we", "1 elif goTo == '1': position[0] -= 1 elif goTo == '2': position[1]", "type - 3 - No crossing newChr.genes = random.choice((self.genes, other.genes)) # Mutations if", "column, number) else: # Right self.right(row, column, number) self.rotate() # Moving down def", "= '3' # Representing up # Moving up def right(self, row, column, number):", "i in range(self.m): row = [] for j in range(self.n): if random.random() <", "garden self.fGarden = Garden(self.garden.map) self.genesFill(garden, start) def genesFill(self, garden, start): self.genes = []", "00, 00, 00, 00, 00, -1, -1, -1, 00, 00, 00], [00, 00,", "number - 1) self.goTo = '3' # Representing up # Moving up def", "else: row.append(0) self.map.append(row) def countrocks(self): self.rocks = 0 for i in self.map: for", "x += 1 if x == len(gene.rotation): x = 0 # If everything", "pocet = 0 # Transforming chars into 2D array of integers while(1): row", "check if it is not raken - if not we move there if", "-1, 00, 00, 00, 00, 00, 00, 00, 00, 00], [00, 00, 00,", "11) self.n = random.randrange(2, 11) self.map = [] for i in range(self.m): row", "enter the garden if g.map[position[0]][position[1]] != 0: continue number += 1 while (1):", "elif inp == '4': print(\"Counting...\") solveMap(test4) else: print(\"You entered wrong command\") sys.exit() else:", "goTo == '1': position[0] -= 1 elif goTo == '2': position[1] += 1", "x: if y == -1: helper += ' K ' else: helper +=", "solved garden into clear grided output helper = \"\" for x in best.fGarden.map:", "in i: if j == 0: self.max_fitness += 1 class Gene: def __init__(self,", "set of chromosones for i in range(50): generation.append(Chromosome(garden)) # Generations creation for i", "goTo = '4' else: if helper.index(position) == 0: goTo = '3' else: goTo", "file.append(row) solveMap(file) elif inp == \"Generate\" or inp == \"generate\": # Generate Function", "00, 00, 00], [00, 00, 00, 00, 00, 00, -1, -1, -1, 00,", "GGarden() # Creation of starting set of chromosones for i in range(50): generation.append(Chromosome(garden))", "00, 00, 00], ] class GGarden: def __init__(self): self.generateRandomMap() self.countrocks() self.fitness() def generateRandomMap(self):", "# New chromosome number = random.random() if number < 0.1: newChr.genes[i] = Gene(self.garden)", "random.randrange((row + column) + (row + column)) # Each direction has its own", "+ 1]) else: helper = ([position[0] - 1, position[1]], [position[0] + 1, position[1]])", "(row - 1, column + column + row - number - 1) self.goTo", "up self.up(row, column, number) else: # Right self.right(row, column, number) self.rotate() # Moving", "00, 00, 00, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00,", "the garden if g.map[position[0]][position[1]] != 0: continue number += 1 while (1): #", "in range(len(newChr.genes)): # New chromosome number = random.random() if number < 0.1: newChr.genes[i]", "< column: # Down self.down(number) elif column <= number < row + column:", "j in i: if j == 0: self.max_fitness += 1 class Gene: def", "print(\"Write: Generate - Generate random map\") print(\"Write: Test - Test mode\") print(\"_________________________________________________________\") inp", "[00, 00, 00, 00, 00, 00, 00, -1, -1, -1, 00, 00], [00,", "not edge of the map if position[0] not in range(g.m) or position[1] not", "column, number): self.start = ((row + column) + (row + column) - number", "row = garden.m column = garden.n number = random.randrange((row + column) + (row", "00, 00, 00, 00], [00, 00, 00, 00, 00, -1, 00, 00, 00,", "+= '\\n' print(helper) print(\"_________________________________________________________\") print(\"Final Result\") # Transforming solved garden into clear grided", "0.85: # Crossing second type - 1 - Choosing random genes from both", "= [] for i in range(self.m): row = [] for j in range(self.n):", "Test - Test mode\") print(\"_________________________________________________________\") inp = input() # File Function - loading", "inp == '1': print(\"Counting...\") solveMap(test1) elif inp == '2': print(\"Counting...\") solveMap(test2) elif inp", "File opening f = open(\"garden.txt\", \"r\") pocet = 0 # Transforming chars into", "00, 00, 00, 00], ] test3 = [ [00, 00, 00, 00, 00,", "Random garden print(\"Counting...\") generateMap() elif inp == \"Test\" or inp == \"test\": #", "j in range(49): # Choose random chromosomes from current generation chromosome1, chromosome2 =", "fing any obstacle we change movement direction if goTo == '3': position[0] +=", "[] for p in helper: try: nv.append(g.map[p[0]][p[1]]) except IndexError: nv.append('X') # If we", "position = helper[gene.rotation[x]] x += 1 if x == len(gene.rotation): x = 0", "gene in self.genes: position = list(gene.start) goTo = gene.goTo x = 0 #", "1 elif goTo == '1': position[0] += 1 elif goTo == '2': position[1]", "GGarden: def __init__(self): self.generateRandomMap() self.countrocks() self.fitness() def generateRandomMap(self): self.m = random.randrange(2, 11) self.n", "<filename>src/main.py<gh_stars>0 import random import copy import sys test1 = [ [00, 00, 00,", "= copy.deepcopy(map) def countrocks(self): self.rocks = 0 for i in self.map: for j", "check surrounding tiles nv = [] for p in helper: try: nv.append(g.map[p[0]][p[1]]) except", "00, 00, 00], [00, -1, -1, -1, -1, 00, 00, 00, 00, 00,", "'X' not in nv: self.fitnessFunc() return break if goTo == '3' or goTo", "00, 00, 00, 00, 00, 00, -1, 00, -1, 00, 00], [00, 00,", "1 - Choosing random genes from both newChr.genes = [] for i in", "- 3 - No crossing newChr.genes = random.choice((self.genes, other.genes)) # Mutations if mutateNum", "4), key=lambda x: x.fitness)[2:4] # Create very new Chromosome by crossing and mutation", "nv.append(g.map[p[0]][p[1]]) except IndexError: nv.append('X') # If we find one not raken if nv.count(0)", "is from Chromosome 2 newChr.genes = self.genes[:pivotPoint] + other.genes[pivotPoint:] else: # Crossing third", "p in helper: try: nv.append(g.map[p[0]][p[1]]) except IndexError: nv.append('X') # If we find one", "== '3': position[0] += 1 elif goTo == '1': position[0] -= 1 elif", "File Function - loading from File if inp == \"File\" or inp ==", "+= 1 # We check if it it not edge of the map", "True: for i in range(helper): self.genes.append(Gene(self.garden)) self.algo() def fitnessFunc(self): self.fitness = 0 itera", "if x == len(gene.rotation): x = 0 # If everything is raken else:", "inp = input() if inp == '1': print(\"Counting...\") solveMap(test1) elif inp == '2':", "garden.rocks if start == True: for i in range(helper): self.genes.append(Gene(self.garden)) self.algo() def fitnessFunc(self):", "second is from Chromosome 2 newChr.genes = self.genes[:pivotPoint] + other.genes[pivotPoint:] else: # Crossing", "of new chromosomes def crossing(self, other): # Create new chromosome with empty genes", "raken - if not we move there if g.map[position[0]][position[1]] == 0: continue #", "'4' # Representing right def rotate(self): self.rotation = [ int(a) for a in", "\"\" for x in best.fGarden.map: for y in x: if y == -1:", "# Crossing first type - 2 - first part is from Chromosome 1,", "solveMap(file) elif inp == \"Generate\" or inp == \"generate\": # Generate Function -", "best.fGarden.map: for y in x: if y == -1: helper += ' K", "[] for i in range(self.m): row = [] for j in range(self.n): if", "= garden.m + garden.n + garden.rocks if start == True: for i in", "goTo = '2' else: goTo = '4' else: if helper.index(position) == 0: goTo", "0 itera = sum(self.fGarden.map, []) for x in itera: if x > 0:", "1, second is from Chromosome 2 newChr.genes = self.genes[:pivotPoint] + other.genes[pivotPoint:] else: #", "Garden(map) # Creation of starting set of chromosones for i in range(50): generation.append(Chromosome(garden))", "= '2' # Representing left # Moving up def up(self, row, column, number):", "len(map[0]) self.map = copy.deepcopy(map) def countrocks(self): self.rocks = 0 for i in self.map:", "number) self.goTo = '1' # Representing dow # Moving left def left(self, column,", "# If we fing any obstacle we change movement direction if goTo ==", "goTo == '3': position[0] += 1 elif goTo == '1': position[0] -= 1", "00, 00, 00, -1, -1, -1, 00, 00, 00], [00, 00, 00, 00,", "== '4': position[1] += 1 # We check if it it not edge", "+= 1 while (1): # Rake the tile g.map[position[0]][position[1]] = number # We", "from current generation chromosome1, chromosome2 = sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4] # Create", "00, 00, 00, 00, 00, 00, 00, 00], ] test2 = [ [00,", "(row + column) - number - 1, 0) self.goTo = '4' # Representing", "not in range(g.m) or position[1] not in range(g.n): break # We check if", "No crossing newChr.genes = random.choice((self.genes, other.genes)) # Mutations if mutateNum < 0.5: self.mutate(newChr)", "inp == \"test\": # Test Function - Choose from availbale tests print(\"_________________________________________________________\") print(\"Choose", "+= 1 elif goTo == '4': position[1] -= 1 # We choose new", "if y == -1: helper += ' K ' else: helper += '%2d", "- Choose from availbale tests print(\"_________________________________________________________\") print(\"Choose the number of test from 1", "' % y helper += '\\n' print(helper) print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome to Zen Garden<-----------------\")", "garden print(\"Counting...\") generateMap() elif inp == \"Test\" or inp == \"test\": # Test", "00, 00, 00, 00, -1, -1, 00, 00], [00, 00, 00, 00, 00,", "== '3' or goTo == '1': if helper.index(position) == 0: goTo = '2'", "number) else: # Right self.right(row, column, number) self.rotate() # Moving down def down(self,", "# We check if it is not raken - if not we move", "-1, -1, 00, 00, 00, 00, 00, 00, 00], [00, 00, 00, 00,", "- 1, 0) self.goTo = '4' # Representing right def rotate(self): self.rotation =", "Test Function - Choose from availbale tests print(\"_________________________________________________________\") print(\"Choose the number of test", "= [] for j in range(self.n): if random.random() < 0.1: row.append(-1) else: row.append(0)", "= 0 # Start of genes iterations for gene in self.genes: position =", "- 1 - Choosing random genes from both newChr.genes = [] for i", "1 class Garden: def __init__(self, map): self.fillMap(map) self.countrocks() self.fitness() def fillMap(self, map): self.m", "i in range(1000): # Saving the best chromosome bestChr = max(generation, key=lambda x:", "f = open(\"garden.txt\", \"r\") pocet = 0 # Transforming chars into 2D array", "'1': print(\"Counting...\") solveMap(test1) elif inp == '2': print(\"Counting...\") solveMap(test2) elif inp == '3':", "solveMap(test3) elif inp == '4': print(\"Counting...\") solveMap(test4) else: print(\"You entered wrong command\") sys.exit()", "except IndexError: nv.append('X') # If we find one not raken if nv.count(0) ==", "in riadok: if i == '00': row.append(0) if i == '-1': row.append(-1) file.append(row)", "'1' self.fitnessFunc() # Mutation of chromosomes def mutate(self, newChr): for i in range(len(newChr.genes)):", "number = i # Formatted Print finalprint(garden, bestChr, number) def generateMap(): # Inicialisation", "([position[0] - 1, position[1]], [position[0] + 1, position[1]]) # We check surrounding tiles", "Crossing second type - 1 - Choosing random genes from both newChr.genes =", "self.genesFill(garden, start) def genesFill(self, garden, start): self.genes = [] helper = garden.m +", "= nextGeneration number = i # Formatted Print finalprint(garden, bestChr, number) def finalprint(garden,", "column, number) self.rotate() # Moving down def down(self, number): self.start = (0, number)", "self.generateRandomMap() self.countrocks() self.fitness() def generateRandomMap(self): self.m = random.randrange(2, 11) self.n = random.randrange(2, 11)", "-1, 00, 00, 00, 00, 00, 00, 00], [00, 00, -1, 00, 00,", "'2' else: goTo = '4' else: if helper.index(position) == 0: goTo = '3'", "We check if we can enter the garden if g.map[position[0]][position[1]] != 0: continue", "inp == '4': print(\"Counting...\") solveMap(test4) else: print(\"You entered wrong command\") sys.exit() else: print(\"You", "of starting set of chromosones for i in range(50): generation.append(Chromosome(garden)) # Generations creation", "try: nv.append(g.map[p[0]][p[1]]) except IndexError: nv.append('X') # If we find one not raken if", "__init__(self): self.generateRandomMap() self.countrocks() self.fitness() def generateRandomMap(self): self.m = random.randrange(2, 11) self.n = random.randrange(2,", "not raken if nv.count(0) == 1: position = helper[nv.index(0)] # if we find", "self.mutate(newChr) newChr.algo() return newChr def solveMap(map): # Inicialisation of variables generation = []", "not in range(g.n): break # We check if it is not raken -", "not in nv: self.fitnessFunc() return break if goTo == '3' or goTo ==", "= random.randrange(2, 11) self.map = [] for i in range(self.m): row = []", "if it is not raken - if not we move there if g.map[position[0]][position[1]]", "00, 00, 00, 00, 00, 00], ] test4 = [ [00, 00, 00,", "Transforming chars into 2D array of integers while(1): row = [] riadok =", "elif goTo == '2': position[1] += 1 elif goTo == '4': position[1] -=", "11) self.map = [] for i in range(self.m): row = [] for j", "import random import copy import sys test1 = [ [00, 00, 00, 00,", "= garden self.fGarden = Garden(self.garden.map) self.genesFill(garden, start) def genesFill(self, garden, start): self.genes =", "goTo == '4': position[1] += 1 # We check if it it not", "start=True): self.garden = garden self.fGarden = Garden(self.garden.map) self.genesFill(garden, start) def genesFill(self, garden, start):", "garden into clear grided output helper = \"\" for x in garden.map: for", "range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif crossNum < 0.425: # Crossing first type - 2", "Representing dow # Moving left def left(self, column, number): self.start = (number -", "range(g.n): break # We check if it is not raken - if not", "[00, 00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00], [00,", "= (row - 1, column + column + row - number - 1)", "for j in i: if j == 0: self.max_fitness += 1 class Gene:", "00, 00, 00, -1, 00, -1, 00, 00, 00], [00, 00, 00, 00,", "00, 00, 00, 00, 00, 00], ] test3 = [ [00, 00, 00,", "00, 00, 00, 00, 00, 00, 00], [00, 00, -1, 00, 00, 00,", "00, 00], [00, 00, 00, 00, 00, 00, 00, 00, -1, -1, 00,", "movement if number < column: # Down self.down(number) elif column <= number <", "+ 1, position[1]]) # We check surrounding tiles nv = [] for p", "print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' % (number + 1, garden.max_fitness, best.fitness)) print('Tiles Left: %d' %", "Gene: def __init__(self, garden): row = garden.m column = garden.n number = random.randrange((row", "chromosomes from current generation chromosome1, chromosome2 = sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4] #", "print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome to Zen Garden<-----------------\") print(\"\") print(\"Choose to load a map from", "If we fing any obstacle we change movement direction if goTo == '3':", "00, 00, -1, 00, 00, 00, 00, 00, 00, 00], [00, -1, -1,", "We choose new tile if goTo == '3' or goTo == '1': helper", "in best.fGarden.map: for y in x: if y == -1: helper += '", "i == '-1': row.append(-1) file.append(row) solveMap(file) elif inp == \"Generate\" or inp ==", "[] for j in range(self.n): if random.random() < 0.1: row.append(-1) else: row.append(0) self.map.append(row)", "Choosing random genes from both newChr.genes = [] for i in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i],", "0.2: newChr.genes[i].rotate() # Crossing of new chromosomes def crossing(self, other): # Create new", "of variables generation = [] # Garden creation garden = GGarden() # Creation", "def down(self, number): self.start = (0, number) self.goTo = '1' # Representing dow", "1 elif goTo == '4': position[1] += 1 # We check if it", "row.append(-1) else: row.append(0) self.map.append(row) def countrocks(self): self.rocks = 0 for i in self.map:", "-1, 00, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00, 00,", "one not raken if nv.count(0) == 1: position = helper[nv.index(0)] # if we", "If we find one not raken if nv.count(0) == 1: position = helper[nv.index(0)]", "Print finalprint(garden, bestChr, number) def generateMap(): # Inicialisation of variables generation = []", "genes iterations for gene in self.genes: position = list(gene.start) goTo = gene.goTo x", "00], [00, 00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00],", "information print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' % (number + 1, garden.max_fitness, best.fitness)) print('Tiles Left: %d'", "00], [00, 00, 00, 00, 00, 00, -1, -1, -1, 00, 00, 00],", "crossing newChr.genes = random.choice((self.genes, other.genes)) # Mutations if mutateNum < 0.5: self.mutate(newChr) newChr.algo()", "it it not edge of the map if position[0] not in range(g.m) or", "one tile exit\") print(\"_________________________________________________________\") inp = input() if inp == '1': print(\"Counting...\") solveMap(test1)", "itera: if x > 0: self.fitness += 1 def algo(self): g = self.fGarden", "mutate(self, newChr): for i in range(len(newChr.genes)): # New chromosome number = random.random() if", "Garden\") # Transforming garden into clear grided output helper = \"\" for x", "y helper += '\\n' print(helper) print(\"_________________________________________________________\") print(\"Final Result\") # Transforming solved garden into", "Generations creation for i in range(800): # Saving the best chromosome bestChr =", "+= 1 riadok = riadok.split() for i in riadok: if i == '00':", "= '3' else: goTo = '1' self.fitnessFunc() # Mutation of chromosomes def mutate(self,", "nextGeneration number = i # Formatted Print finalprint(garden, bestChr, number) def generateMap(): #", "for i in range(self.m): row = [] for j in range(self.n): if random.random()", "00, 00, 00, 00, 00], [00, 00, 00, 00, 00, 00, -1, -1,", "+ column: # Left self.left(column, number) elif row + column <= number <", "tile g.map[position[0]][position[1]] = number # We chose next tile to move on if", "= random.random() if number < 0.1: newChr.genes[i] = Gene(self.garden) # New rotations elif", "# if we find two not raken elif nv.count(0) == 2: position =", "00, 00, -1, -1, -1, 00, 00], [00, 00, 00, 00, 00, 00,", "creation garden = Garden(map) # Creation of starting set of chromosones for i", "# Left self.left(column, number) elif row + column <= number < column +", "We check if it is not raken - if not we move there", "start == True: for i in range(helper): self.genes.append(Gene(self.garden)) self.algo() def fitnessFunc(self): self.fitness =", "00, 00, 00, 00, 00], [00, 00, 00, 00, -1, 00, 00, 00,", "1) self.goTo = '2' # Representing left # Moving up def up(self, row,", "Creation of starting set of chromosones for i in range(50): generation.append(Chromosome(garden)) # Generations", "Left: %d' % (garden.max_fitness - best.fitness)) print(\"_________________________________________________________\") print(\"Initial Garden\") # Transforming garden into", "'1': helper = ([position[0], position[1] - 1], [position[0], position[1] + 1]) else: helper", "Transforming garden into clear grided output helper = \"\" for x in garden.map:", "1 def fitness(self): self.max_fitness = 0 for i in self.map: for j in", "# Rake the tile g.map[position[0]][position[1]] = number # We chose next tile to", "for x in best.fGarden.map: for y in x: if y == -1: helper", "00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00], [00, -1,", "= 0 for i in self.map: for j in i: if j ==", "= (number - column, column - 1) self.goTo = '2' # Representing left", "for j in range(49): # Choose random chromosomes from current generation chromosome1, chromosome2", "Test mode\") print(\"_________________________________________________________\") inp = input() # File Function - loading from File", "- Generate random map\") print(\"Write: Test - Test mode\") print(\"_________________________________________________________\") inp = input()", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], ] class", "all necesarry generation information print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' % (number + 1, garden.max_fitness, best.fitness))", "00, 00, 00, 00, 00], ] test3 = [ [00, 00, 00, 00,", "j in i: if j == -1: self.rocks += 1 def fitness(self): self.max_fitness", "Formatted Print finalprint(garden, bestChr, number) def finalprint(garden, best, number): print() print() # Printing", "i in range(helper): self.genes.append(Gene(self.garden)) self.algo() def fitnessFunc(self): self.fitness = 0 itera = sum(self.fGarden.map,", "mutateNum = random.random() crossNum = random.random() pivotPoint = random.randrange(len(self.genes)) if crossNum < 0.85:", "00, -1, -1, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00,", "Rake the tile g.map[position[0]][position[1]] = number # We chose next tile to move", "chromosome2 = sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4] # Create very new Chromosome by", "# Garden creation garden = GGarden() # Creation of starting set of chromosones", "chromosome1, chromosome2 = sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4] # Create very new Chromosome", "else: goTo = '4' else: if helper.index(position) == 0: goTo = '3' else:", "garden.max_fitness, best.fitness)) print('Tiles Left: %d' % (garden.max_fitness - best.fitness)) print(\"_________________________________________________________\") print(\"Initial Garden\") #", "self.countrocks() self.fitness() def generateRandomMap(self): self.m = random.randrange(2, 11) self.n = random.randrange(2, 11) self.map", "print(\"Test1 - Model test\") print(\"Test2 - Unsolvable test\") print(\"Test3 - Test with staying", "# Create new chromosome with empty genes newChr = Chromosome(self.garden, False) # Crossing", "[] # File opening f = open(\"garden.txt\", \"r\") pocet = 0 # Transforming", "+= 1 class Gene: def __init__(self, garden): row = garden.m column = garden.n", "Crossing first type - 2 - first part is from Chromosome 1, second", "# File opening f = open(\"garden.txt\", \"r\") pocet = 0 # Transforming chars", "def countrocks(self): self.rocks = 0 for i in self.map: for j in i:", "nextGeneration number = i # Formatted Print finalprint(garden, bestChr, number) def finalprint(garden, best,", "import copy import sys test1 = [ [00, 00, 00, 00, 00, 00,", "' else: helper += '%2d ' % y helper += '\\n' print(helper) print(\"_________________________________________________________\")", "We check if it it not edge of the map if position[0] not", "self.m = random.randrange(2, 11) self.n = random.randrange(2, 11) self.map = [] for i", "chromosones to fill generation for j in range(49): # Choose random chromosomes from", "# Moving up def up(self, row, column, number): self.start = (row - 1,", "file = [] # File opening f = open(\"garden.txt\", \"r\") pocet = 0", "not raken elif nv.count(0) == 2: position = helper[gene.rotation[x]] x += 1 if", "in x: if y == -1: helper += ' K ' else: helper", "# up self.up(row, column, number) else: # Right self.right(row, column, number) self.rotate() #", "row: # up self.up(row, column, number) else: # Right self.right(row, column, number) self.rotate()", "00, 00, 00, 00], ] test2 = [ [00, 00, 00, 00, 00,", "'1' # Representing dow # Moving left def left(self, column, number): self.start =", "= helper[gene.rotation[x]] x += 1 if x == len(gene.rotation): x = 0 #", "in self.map: for j in i: if j == -1: self.rocks += 1", "# Crossing process mutateNum = random.random() crossNum = random.random() pivotPoint = random.randrange(len(self.genes)) if", "00, -1, 00, 00, 00, 00, 00, 00], [00, -1, 00, 00, 00,", "00], ] test4 = [ [00, 00, 00, 00, 00, 00, 00, -1,", "newChr = Chromosome(self.garden, False) # Crossing process mutateNum = random.random() crossNum = random.random()", "solution if bestChr.fitness == garden.max_fitness: break # Creating more chromosones to fill generation", "= [] for i in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif crossNum < 0.425: #", "# File Function - loading from File if inp == \"File\" or inp", "if i == '-1': row.append(-1) file.append(row) solveMap(file) elif inp == \"Generate\" or inp", "if we didnt find solution if bestChr.fitness == garden.max_fitness: break # Creating more", "-1, -1, -1, 00, 00, 00], [00, 00, 00, 00, 00, 00, 00,", "== '1': helper = ([position[0], position[1] - 1], [position[0], position[1] + 1]) else:", "countrocks(self): self.rocks = 0 for i in self.map: for j in i: if", "New rotations elif number < 0.2: newChr.genes[i].rotate() # Crossing of new chromosomes def", "00, 00, -1, 00, -1, 00, 00, 00], [00, 00, 00, 00, 00,", "output helper = \"\" for x in best.fGarden.map: for y in x: if", "00, 00, 00, 00, 00], [00, -1, -1, -1, -1, 00, 00, 00,", "Down self.down(number) elif column <= number < row + column: # Left self.left(column,", "column: # Left self.left(column, number) elif row + column <= number < column", "if we find two not raken elif nv.count(0) == 2: position = helper[gene.rotation[x]]", "1 elif goTo == '2': position[1] += 1 elif goTo == '4': position[1]", "for i in riadok: if i == '00': row.append(0) if i == '-1':", "00, 00, -1, -1, 00, 00], [00, 00, 00, 00, 00, 00, 00,", "riadok = riadok.split() for i in riadok: if i == '00': row.append(0) if", "direction if goTo == '3': position[0] += 1 elif goTo == '1': position[0]", "generation chromosome1, chromosome2 = sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4] # Create very new", "print(\"Counting...\") file = [] # File opening f = open(\"garden.txt\", \"r\") pocet =", "range(50): generation.append(Chromosome(garden)) # Generations creation for i in range(1000): # Saving the best", "00, 00, 00, 00, -1, -1, -1, 00, 00, 00], [00, 00, 00,", "in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif crossNum < 0.425: # Crossing first type -", "from a file or generate a random one\") print(\"_________________________________________________________\") print(\"Write: File - Load", "1, position[1]]) # We check surrounding tiles nv = [] for p in", "in self.map: for j in i: if j == 0: self.max_fitness += 1", "number) def finalprint(garden, best, number): print() print() # Printing all necesarry generation information", "mode\") print(\"_________________________________________________________\") inp = input() # File Function - loading from File if", "creation for i in range(1000): # Saving the best chromosome bestChr = max(generation,", "self.goTo = '1' # Representing dow # Moving left def left(self, column, number):", "# Crossing second type - 1 - Choosing random genes from both newChr.genes", "tests print(\"_________________________________________________________\") print(\"Choose the number of test from 1 - 4\") print(\"Test1 -", "garden.m + garden.n + garden.rocks if start == True: for i in range(helper):", "else: helper = ([position[0] - 1, position[1]], [position[0] + 1, position[1]]) # We", "obstacle we change movement direction if goTo == '3': position[0] += 1 elif", "newChr.genes = [] for i in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif crossNum < 0.425:", "0: self.max_fitness += 1 class Gene: def __init__(self, garden): row = garden.m column", "== '2': print(\"Counting...\") solveMap(test2) elif inp == '3': print(\"Counting...\") solveMap(test3) elif inp ==", "len(gene.rotation): x = 0 # If everything is raken else: if 'X' not", "00, 00, 00, 00, 00, 00, 00], ] class GGarden: def __init__(self): self.generateRandomMap()", "-1, -1, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00, 00,", "print(\"Choose to load a map from a file or generate a random one\")", "elif goTo == '2': position[1] -= 1 elif goTo == '4': position[1] +=", "crossNum < 0.85: # Crossing second type - 1 - Choosing random genes", "00, 00, 00, 00, 00, 00, 00, 00], [00, 00, 00, 00, -1,", "random.choice((self.genes, other.genes)) # Mutations if mutateNum < 0.5: self.mutate(newChr) newChr.algo() return newChr def", "'1': position[0] += 1 elif goTo == '2': position[1] -= 1 elif goTo", "Chromosome(self.garden, False) # Crossing process mutateNum = random.random() crossNum = random.random() pivotPoint =", "column, number): self.start = (row - 1, column + column + row -", "IndexError: nv.append('X') # If we find one not raken if nv.count(0) == 1:", "test\") print(\"Test2 - Unsolvable test\") print(\"Test3 - Test with staying in the garden\")", "# Creation of starting set of chromosones for i in range(50): generation.append(Chromosome(garden)) #", "def __init__(self): self.generateRandomMap() self.countrocks() self.fitness() def generateRandomMap(self): self.m = random.randrange(2, 11) self.n =", "([position[0], position[1] - 1], [position[0], position[1] + 1]) else: helper = ([position[0] -", "1 - 4\") print(\"Test1 - Model test\") print(\"Test2 - Unsolvable test\") print(\"Test3 -", "class Garden: def __init__(self, map): self.fillMap(map) self.countrocks() self.fitness() def fillMap(self, map): self.m =", "self.rotation = [ int(a) for a in bin(random.randrange(1024)) [2:].zfill(10) ] class Chromosome: def", "column) + (row + column) - number - 1, 0) self.goTo = '4'", "[position[0], position[1] + 1]) else: helper = ([position[0] - 1, position[1]], [position[0] +", "print(\"\") print(\"Choose to load a map from a file or generate a random", "of variables generation = [] # Garden creation garden = Garden(map) # Creation", "= open(\"garden.txt\", \"r\") pocet = 0 # Transforming chars into 2D array of", "creation for i in range(800): # Saving the best chromosome bestChr = max(generation,", "- 1], [position[0], position[1] + 1]) else: helper = ([position[0] - 1, position[1]],", "Chromosome 1, second is from Chromosome 2 newChr.genes = self.genes[:pivotPoint] + other.genes[pivotPoint:] else:", "def __init__(self, garden): row = garden.m column = garden.n number = random.randrange((row +", "garden): row = garden.m column = garden.n number = random.randrange((row + column) +", "00, 00, 00, 00, 00], [00, 00, 00, 00, 00, -1, 00, 00,", "max(generation, key=lambda x: x.fitness) nextGeneration = [bestChr] # Check if we didnt find", "== '1': position[0] -= 1 elif goTo == '2': position[1] += 1 elif", "garden into clear grided output helper = \"\" for x in best.fGarden.map: for", "newChr.genes[i].rotate() # Crossing of new chromosomes def crossing(self, other): # Create new chromosome", "= ([position[0] - 1, position[1]], [position[0] + 1, position[1]]) # We check surrounding", "if it it not edge of the map if position[0] not in range(g.m)", "# We chose next tile to move on if goTo == '3': position[0]", "array of integers while(1): row = [] riadok = f.readline() if riadok ==", "we change movement direction if goTo == '3': position[0] += 1 elif goTo", "bestChr, number) def finalprint(garden, best, number): print() print() # Printing all necesarry generation", "% y helper += '\\n' print(helper) print(\"_________________________________________________________\") print(\"Final Result\") # Transforming solved garden", "column = garden.n number = random.randrange((row + column) + (row + column)) #", "up def up(self, row, column, number): self.start = (row - 1, column +", "generate Random garden print(\"Counting...\") generateMap() elif inp == \"Test\" or inp == \"test\":", "== '': break pocet += 1 riadok = riadok.split() for i in riadok:", "tile exit\") print(\"_________________________________________________________\") inp = input() if inp == '1': print(\"Counting...\") solveMap(test1) elif", "= Chromosome(self.garden, False) # Crossing process mutateNum = random.random() crossNum = random.random() pivotPoint", "00, 00, 00, 00, 00, -1, 00, -1, 00, 00], [00, 00, 00,", "in garden.map: for y in x: if y == -1: helper += '", "-1, 00, -1, 00, 00], [00, 00, 00, 00, 00, 00, 00, -1,", "print(\"_________________________________________________________\") print(\"Write: File - Load from file\") print(\"Write: Generate - Generate random map\")", "newChr.algo() return newChr def solveMap(map): # Inicialisation of variables generation = [] #", "'4': position[1] += 1 # We check if it it not edge of", "> 0: self.fitness += 1 def algo(self): g = self.fGarden number = 0", "- 1) self.goTo = '2' # Representing left # Moving up def up(self,", "inp = input() # File Function - loading from File if inp ==", "- Load from file\") print(\"Write: Generate - Generate random map\") print(\"Write: Test -", "import sys test1 = [ [00, 00, 00, 00, 00, 00, 00, 00,", "for p in helper: try: nv.append(g.map[p[0]][p[1]]) except IndexError: nv.append('X') # If we find", "# Transforming garden into clear grided output helper = \"\" for x in", "00, -1, 00, -1, 00, 00], [00, 00, 00, 00, 00, 00, 00,", "1], [position[0], position[1] + 1]) else: helper = ([position[0] - 1, position[1]], [position[0]", "= 0 itera = sum(self.fGarden.map, []) for x in itera: if x >", "elif nv.count(0) == 2: position = helper[gene.rotation[x]] x += 1 if x ==", "column)) # Each direction has its own function for movement if number <", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00], ] test4 =", "y in x: if y == -1: helper += ' K ' else:", "mutateNum < 0.5: self.mutate(newChr) newChr.algo() return newChr def solveMap(map): # Inicialisation of variables", "sys test1 = [ [00, 00, 00, 00, 00, 00, 00, 00, 00,", "0: continue number += 1 while (1): # Rake the tile g.map[position[0]][position[1]] =", "it is not raken - if not we move there if g.map[position[0]][position[1]] ==", "= Garden(map) # Creation of starting set of chromosones for i in range(50):", "y helper += '\\n' print(helper) print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome to Zen Garden<-----------------\") print(\"\") print(\"Choose", "tile to move on if goTo == '3': position[0] -= 1 elif goTo", "[] # Garden creation garden = Garden(map) # Creation of starting set of", "nv.append('X') # If we find one not raken if nv.count(0) == 1: position", "map if position[0] not in range(g.m) or position[1] not in range(g.n): break #", "= random.random() pivotPoint = random.randrange(len(self.genes)) if crossNum < 0.85: # Crossing second type", "- Test with one tile exit\") print(\"_________________________________________________________\") inp = input() if inp ==", "in nv: self.fitnessFunc() return break if goTo == '3' or goTo == '1':", "# Mutation of chromosomes def mutate(self, newChr): for i in range(len(newChr.genes)): # New", "'3' else: goTo = '1' self.fitnessFunc() # Mutation of chromosomes def mutate(self, newChr):", "tiles nv = [] for p in helper: try: nv.append(g.map[p[0]][p[1]]) except IndexError: nv.append('X')", "# We check if we can enter the garden if g.map[position[0]][position[1]] != 0:", "from availbale tests print(\"_________________________________________________________\") print(\"Choose the number of test from 1 - 4\")", "- first part is from Chromosome 1, second is from Chromosome 2 newChr.genes", "00, 00], ] class GGarden: def __init__(self): self.generateRandomMap() self.countrocks() self.fitness() def generateRandomMap(self): self.m", "1 class Gene: def __init__(self, garden): row = garden.m column = garden.n number", "number < column + column + row: # up self.up(row, column, number) else:", "in helper: try: nv.append(g.map[p[0]][p[1]]) except IndexError: nv.append('X') # If we find one not", "test3 = [ [00, 00, 00, 00, 00, 00, 00, 00, 00, 00,", "[] riadok = f.readline() if riadok == '': break pocet += 1 riadok", "row, column, number): self.start = ((row + column) + (row + column) -", "garden, start): self.genes = [] helper = garden.m + garden.n + garden.rocks if", "'3': print(\"Counting...\") solveMap(test3) elif inp == '4': print(\"Counting...\") solveMap(test4) else: print(\"You entered wrong", "(number - column, column - 1) self.goTo = '2' # Representing left #", "print(\"_________________________________________________________\") print(\"Initial Garden\") # Transforming garden into clear grided output helper = \"\"", "-1, 00, -1, 00, 00, 00], [00, 00, 00, 00, 00, 00, -1,", "00, 00], [00, 00, 00, 00, 00, 00, -1, -1, -1, 00, 00,", "self.fitness() def fillMap(self, map): self.m = len(map) self.n = len(map[0]) self.map = copy.deepcopy(map)", "00, 00, 00, 00, 00, 00, 00, 00], [00, 00, 00, 00, 00,", "[00, -1, -1, -1, -1, 00, 00, 00, 00, 00, 00, 00], [00,", "we move there if g.map[position[0]][position[1]] == 0: continue # If we fing any", "Load from file\") print(\"Write: Generate - Generate random map\") print(\"Write: Test - Test", "range(50): generation.append(Chromosome(garden)) # Generations creation for i in range(800): # Saving the best", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00], [00, -1, -1,", "crossing and mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation = nextGeneration number = i # Formatted Print", "Choose from availbale tests print(\"_________________________________________________________\") print(\"Choose the number of test from 1 -", "00, 00, 00, -1, -1, -1, 00, 00], [00, 00, 00, 00, 00,", "int(a) for a in bin(random.randrange(1024)) [2:].zfill(10) ] class Chromosome: def __init__(self, garden, start=True):", "crossNum = random.random() pivotPoint = random.randrange(len(self.genes)) if crossNum < 0.85: # Crossing second", "in range(50): generation.append(Chromosome(garden)) # Generations creation for i in range(1000): # Saving the", "Garden creation garden = Garden(map) # Creation of starting set of chromosones for", "self.genes = [] helper = garden.m + garden.n + garden.rocks if start ==", "integers while(1): row = [] riadok = f.readline() if riadok == '': break", "dow # Moving left def left(self, column, number): self.start = (number - column,", "for i in range(800): # Saving the best chromosome bestChr = max(generation, key=lambda", "random one\") print(\"_________________________________________________________\") print(\"Write: File - Load from file\") print(\"Write: Generate - Generate", "self.max_fitness = 0 for i in self.map: for j in i: if j", "[00, 00, 00, 00, 00, 00, -1, -1, -1, 00, 00, 00], [00,", "< row + column: # Left self.left(column, number) elif row + column <=", "00, 00, 00, 00, 00, 00, 00, 00, 00, 00], ] class GGarden:", "00, 00, 00, 00], [00, 00, -1, 00, 00, 00, 00, 00, 00,", "= [bestChr] # Check if we didnt find solution if bestChr.fitness == garden.max_fitness:", "# Right self.right(row, column, number) self.rotate() # Moving down def down(self, number): self.start", "= gene.goTo x = 0 # We check if we can enter the", "print('Tiles Left: %d' % (garden.max_fitness - best.fitness)) print(\"_________________________________________________________\") print(\"Initial Garden\") # Transforming garden", "File if inp == \"File\" or inp == \"file\": print(\"Counting...\") file = []", "chars into 2D array of integers while(1): row = [] riadok = f.readline()", "number): self.start = (number - column, column - 1) self.goTo = '2' #", "self.fitness = 0 itera = sum(self.fGarden.map, []) for x in itera: if x", "== '3': print(\"Counting...\") solveMap(test3) elif inp == '4': print(\"Counting...\") solveMap(test4) else: print(\"You entered", "garden.max_fitness: break # Creating more chromosones to fill generation for j in range(49):", "00], [00, 00, 00, 00, 00, 00, 00, 00, -1, -1, 00, 00],", "0 # We check if we can enter the garden if g.map[position[0]][position[1]] !=", "% (garden.max_fitness - best.fitness)) print(\"_________________________________________________________\") print(\"Initial Garden\") # Transforming garden into clear grided", "test from 1 - 4\") print(\"Test1 - Model test\") print(\"Test2 - Unsolvable test\")", "file or generate a random one\") print(\"_________________________________________________________\") print(\"Write: File - Load from file\")", "random chromosomes from current generation chromosome1, chromosome2 = sorted(random.sample(generation, 4), key=lambda x: x.fitness)[2:4]", "00, 00, 00, 00, 00], ] test4 = [ [00, 00, 00, 00,", "in range(self.m): row = [] for j in range(self.n): if random.random() < 0.1:", "= (0, number) self.goTo = '1' # Representing dow # Moving left def", "two not raken elif nv.count(0) == 2: position = helper[gene.rotation[x]] x += 1", "== '2': position[1] -= 1 elif goTo == '4': position[1] += 1 #", "if mutateNum < 0.5: self.mutate(newChr) newChr.algo() return newChr def solveMap(map): # Inicialisation of", "exit\") print(\"_________________________________________________________\") inp = input() if inp == '1': print(\"Counting...\") solveMap(test1) elif inp", "00, 00, 00], [00, 00, 00, 00, 00, 00, -1, 00, -1, 00,", "-1, -1, -1, 00, 00], [00, 00, 00, 00, 00, 00, 00, 00,", "Garden creation garden = GGarden() # Creation of starting set of chromosones for", "[position[0] + 1, position[1]]) # We check surrounding tiles nv = [] for", "we find two not raken elif nv.count(0) == 2: position = helper[gene.rotation[x]] x", "break if goTo == '3' or goTo == '1': if helper.index(position) == 0:", "print(\"----------------->Welcome to Zen Garden<-----------------\") print(\"\") print(\"Choose to load a map from a file", "00, 00], [00, -1, 00, 00, 00, 00, 00, 00, 00, 00, 00,", "00], ] test2 = [ [00, 00, 00, 00, 00, 00, 00, 00,", "== \"Generate\" or inp == \"generate\": # Generate Function - generate Random garden", "(number + 1, garden.max_fitness, best.fitness)) print('Tiles Left: %d' % (garden.max_fitness - best.fitness)) print(\"_________________________________________________________\")", "- column, column - 1) self.goTo = '2' # Representing left # Moving", "Inicialisation of variables generation = [] # Garden creation garden = GGarden() #", "self.start = (0, number) self.goTo = '1' # Representing dow # Moving left", "map): self.m = len(map) self.n = len(map[0]) self.map = copy.deepcopy(map) def countrocks(self): self.rocks", "Unsolvable test\") print(\"Test3 - Test with staying in the garden\") print(\"Test4 - Test", "i in self.map: for j in i: if j == 0: self.max_fitness +=", "00, 00, 00, 00, 00, 00, 00, 00], [00, -1, -1, -1, -1,", "self.countrocks() self.fitness() def fillMap(self, map): self.m = len(map) self.n = len(map[0]) self.map =", "in i: if j == -1: self.rocks += 1 def fitness(self): self.max_fitness =", "- Unsolvable test\") print(\"Test3 - Test with staying in the garden\") print(\"Test4 -", "i in range(50): generation.append(Chromosome(garden)) # Generations creation for i in range(800): # Saving", "self.fillMap(map) self.countrocks() self.fitness() def fillMap(self, map): self.m = len(map) self.n = len(map[0]) self.map", "helper += '%2d ' % y helper += '\\n' print(helper) print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome", "both newChr.genes = [] for i in range(len(self.genes)): newChr.genes.append(random.choice((self.genes[i], other.genes[i]))) elif crossNum <", "1, column + column + row - number - 1) self.goTo = '3'", "position[1] -= 1 # We choose new tile if goTo == '3' or", "if bestChr.fitness == garden.max_fitness: break # Creating more chromosones to fill generation for", "or inp == \"test\": # Test Function - Choose from availbale tests print(\"_________________________________________________________\")", "j in i: if j == 0: self.max_fitness += 1 class Garden: def", "input() # File Function - loading from File if inp == \"File\" or", "= random.randrange(2, 11) self.n = random.randrange(2, 11) self.map = [] for i in", "x: x.fitness)[2:4] # Create very new Chromosome by crossing and mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation", "+ row: # up self.up(row, column, number) else: # Right self.right(row, column, number)", "= Garden(self.garden.map) self.genesFill(garden, start) def genesFill(self, garden, start): self.genes = [] helper =", "00, 00, 00, 00], [00, -1, 00, 00, 00, 00, 00, 00, 00,", "(garden.max_fitness - best.fitness)) print(\"_________________________________________________________\") print(\"Initial Garden\") # Transforming garden into clear grided output", "'2' # Representing left # Moving up def up(self, row, column, number): self.start", "column + column + row: # up self.up(row, column, number) else: # Right", "third type - 3 - No crossing newChr.genes = random.choice((self.genes, other.genes)) # Mutations", "random import copy import sys test1 = [ [00, 00, 00, 00, 00,", "bin(random.randrange(1024)) [2:].zfill(10) ] class Chromosome: def __init__(self, garden, start=True): self.garden = garden self.fGarden", "# Inicialisation of variables generation = [] # Garden creation garden = Garden(map)", "column + column + row - number - 1) self.goTo = '3' #", "elif row + column <= number < column + column + row: #", "helper += ' K ' else: helper += '%2d ' % y helper", "j == 0: self.max_fitness += 1 class Garden: def __init__(self, map): self.fillMap(map) self.countrocks()", "-1, -1, 00, 00, 00], [00, 00, 00, 00, 00, 00, -1, 00,", "# Moving up def right(self, row, column, number): self.start = ((row + column)", "row.append(0) self.map.append(row) def countrocks(self): self.rocks = 0 for i in self.map: for j", "[] # Garden creation garden = GGarden() # Creation of starting set of", "= [] helper = garden.m + garden.n + garden.rocks if start == True:", "# Creating more chromosones to fill generation for j in range(49): # Choose", "<= number < row + column: # Left self.left(column, number) elif row +", "1 riadok = riadok.split() for i in riadok: if i == '00': row.append(0)", "more chromosones to fill generation for j in range(49): # Choose random chromosomes", "# Printing all necesarry generation information print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d' % (number + 1,", "i in self.map: for j in i: if j == -1: self.rocks +=", "x = 0 # If everything is raken else: if 'X' not in", "00, 00, 00, 00, 00, -1, -1, 00, 00], [00, 00, 00, 00,", "if goTo == '3': position[0] += 1 elif goTo == '1': position[0] -=", "- loading from File if inp == \"File\" or inp == \"file\": print(\"Counting...\")", "helper = garden.m + garden.n + garden.rocks if start == True: for i", "+= '%2d ' % y helper += '\\n' print(helper) print(\"_________________________________________________________\") print(\"Final Result\") #", "for i in range(len(newChr.genes)): # New chromosome number = random.random() if number <", "print(\"Write: Test - Test mode\") print(\"_________________________________________________________\") inp = input() # File Function -", "00, -1, -1, -1, 00, 00, 00], [00, 00, 00, 00, 00, 00,", "elif inp == \"Test\" or inp == \"test\": # Test Function - Choose", "if inp == '1': print(\"Counting...\") solveMap(test1) elif inp == '2': print(\"Counting...\") solveMap(test2) elif", "00, 00], ] test4 = [ [00, 00, 00, 00, 00, 00, 00,", "if goTo == '3': position[0] -= 1 elif goTo == '1': position[0] +=", "part is from Chromosome 1, second is from Chromosome 2 newChr.genes = self.genes[:pivotPoint]", "= '4' else: if helper.index(position) == 0: goTo = '3' else: goTo =", "by crossing and mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation = nextGeneration number = i # Formatted", "-1, -1, -1, -1, 00, 00, 00, 00, 00, 00, 00], [00, 00,", "number) self.rotate() # Moving down def down(self, number): self.start = (0, number) self.goTo", "genes newChr = Chromosome(self.garden, False) # Crossing process mutateNum = random.random() crossNum =", "x == len(gene.rotation): x = 0 # If everything is raken else: if", "self.goTo = '2' # Representing left # Moving up def up(self, row, column,", "best chromosome bestChr = max(generation, key=lambda x: x.fitness) nextGeneration = [bestChr] # Check", "== 1: position = helper[nv.index(0)] # if we find two not raken elif", "finalprint(garden, best, number): print() print() # Printing all necesarry generation information print('Generations:%4d Max-Fitness:%4d", "row.append(-1) file.append(row) solveMap(file) elif inp == \"Generate\" or inp == \"generate\": # Generate", "0: self.max_fitness += 1 class Garden: def __init__(self, map): self.fillMap(map) self.countrocks() self.fitness() def", "# Representing left # Moving up def up(self, row, column, number): self.start =", "inp == \"File\" or inp == \"file\": print(\"Counting...\") file = [] # File", "- Test with staying in the garden\") print(\"Test4 - Test with one tile", "i in range(50): generation.append(Chromosome(garden)) # Generations creation for i in range(1000): # Saving", "# We check if it it not edge of the map if position[0]", "'\\n' print(helper) print(\"_________________________________________________________\") print(\"Final Result\") # Transforming solved garden into clear grided output", "a map from a file or generate a random one\") print(\"_________________________________________________________\") print(\"Write: File", "number = random.random() if number < 0.1: newChr.genes[i] = Gene(self.garden) # New rotations", "Function - Choose from availbale tests print(\"_________________________________________________________\") print(\"Choose the number of test from", "helper = \"\" for x in garden.map: for y in x: if y", "break # We check if it is not raken - if not we", "Creating more chromosones to fill generation for j in range(49): # Choose random", "iterations for gene in self.genes: position = list(gene.start) goTo = gene.goTo x =", "elif goTo == '4': position[1] -= 1 # We choose new tile if", "key=lambda x: x.fitness)[2:4] # Create very new Chromosome by crossing and mutation nextGeneration.append(chromosome1.crossing(chromosome2))", "== 0: goTo = '3' else: goTo = '1' self.fitnessFunc() # Mutation of", "x in garden.map: for y in x: if y == -1: helper +=", "print(\"Counting...\") solveMap(test4) else: print(\"You entered wrong command\") sys.exit() else: print(\"You entered wrong command\")", "= nextGeneration number = i # Formatted Print finalprint(garden, bestChr, number) def generateMap():", "for i in range(50): generation.append(Chromosome(garden)) # Generations creation for i in range(1000): #", "self.fGarden number = 0 # Start of genes iterations for gene in self.genes:", "# Moving down def down(self, number): self.start = (0, number) self.goTo = '1'", "Function - loading from File if inp == \"File\" or inp == \"file\":", "chromosones for i in range(50): generation.append(Chromosome(garden)) # Generations creation for i in range(1000):", "list(gene.start) goTo = gene.goTo x = 0 # We check if we can", "position[0] not in range(g.m) or position[1] not in range(g.n): break # We check", "with one tile exit\") print(\"_________________________________________________________\") inp = input() if inp == '1': print(\"Counting...\")", "[00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00], ]", "Generate Function - generate Random garden print(\"Counting...\") generateMap() elif inp == \"Test\" or", "def __init__(self, garden, start=True): self.garden = garden self.fGarden = Garden(self.garden.map) self.genesFill(garden, start) def", "garden if g.map[position[0]][position[1]] != 0: continue number += 1 while (1): # Rake", "Crossing third type - 3 - No crossing newChr.genes = random.choice((self.genes, other.genes)) #", "to move on if goTo == '3': position[0] -= 1 elif goTo ==", "if position[0] not in range(g.m) or position[1] not in range(g.n): break # We", "== 0: goTo = '2' else: goTo = '4' else: if helper.index(position) ==", "Moving up def right(self, row, column, number): self.start = ((row + column) +", "'-1': row.append(-1) file.append(row) solveMap(file) elif inp == \"Generate\" or inp == \"generate\": #", "position[0] -= 1 elif goTo == '1': position[0] += 1 elif goTo ==", "file\") print(\"Write: Generate - Generate random map\") print(\"Write: Test - Test mode\") print(\"_________________________________________________________\")", "+ row - number - 1) self.goTo = '3' # Representing up #", "< column + column + row: # up self.up(row, column, number) else: #", "\"file\": print(\"Counting...\") file = [] # File opening f = open(\"garden.txt\", \"r\") pocet", "move there if g.map[position[0]][position[1]] == 0: continue # If we fing any obstacle", "self.fitness += 1 def algo(self): g = self.fGarden number = 0 # Start", "in itera: if x > 0: self.fitness += 1 def algo(self): g =", "if j == 0: self.max_fitness += 1 class Garden: def __init__(self, map): self.fillMap(map)", "for j in i: if j == -1: self.rocks += 1 def fitness(self):", "newChr def solveMap(map): # Inicialisation of variables generation = [] # Garden creation", "row + column <= number < column + column + row: # up", "generateRandomMap(self): self.m = random.randrange(2, 11) self.n = random.randrange(2, 11) self.map = [] for", "00, 00, 00], ] test2 = [ [00, 00, 00, 00, 00, 00,", "!= 0: continue number += 1 while (1): # Rake the tile g.map[position[0]][position[1]]", "# Choose random chromosomes from current generation chromosome1, chromosome2 = sorted(random.sample(generation, 4), key=lambda", "00, 00, -1, 00, -1, 00, 00], [00, 00, 00, 00, 00, 00,", "find one not raken if nv.count(0) == 1: position = helper[nv.index(0)] # if", "choose new tile if goTo == '3' or goTo == '1': helper =", "Result\") # Transforming solved garden into clear grided output helper = \"\" for", "column + row - number - 1) self.goTo = '3' # Representing up", "((row + column) + (row + column) - number - 1, 0) self.goTo", "__init__(self, map): self.fillMap(map) self.countrocks() self.fitness() def fillMap(self, map): self.m = len(map) self.n =", "# Inicialisation of variables generation = [] # Garden creation garden = GGarden()", "i # Formatted Print finalprint(garden, bestChr, number) def finalprint(garden, best, number): print() print()", "of integers while(1): row = [] riadok = f.readline() if riadok == '':", "(1): # Rake the tile g.map[position[0]][position[1]] = number # We chose next tile", "== \"file\": print(\"Counting...\") file = [] # File opening f = open(\"garden.txt\", \"r\")", "range(helper): self.genes.append(Gene(self.garden)) self.algo() def fitnessFunc(self): self.fitness = 0 itera = sum(self.fGarden.map, []) for", "= 0 # If everything is raken else: if 'X' not in nv:", "00, 00, 00, 00, 00, 00], ] class GGarden: def __init__(self): self.generateRandomMap() self.countrocks()", "00], [00, 00, 00, 00, 00, 00, 00, -1, -1, -1, 00, 00],", "Right self.right(row, column, number) self.rotate() # Moving down def down(self, number): self.start =", "for movement if number < column: # Down self.down(number) elif column <= number", "+ column + row: # up self.up(row, column, number) else: # Right self.right(row,", "in range(self.n): if random.random() < 0.1: row.append(-1) else: row.append(0) self.map.append(row) def countrocks(self): self.rocks", "Generations creation for i in range(1000): # Saving the best chromosome bestChr =", "+= '\\n' print(helper) print(\"_________________________________________________________\") sys.exit() print(\"----------------->Welcome to Zen Garden<-----------------\") print(\"\") print(\"Choose to load", "if start == True: for i in range(helper): self.genes.append(Gene(self.garden)) self.algo() def fitnessFunc(self): self.fitness", "2D array of integers while(1): row = [] riadok = f.readline() if riadok", "self.up(row, column, number) else: # Right self.right(row, column, number) self.rotate() # Moving down", "def up(self, row, column, number): self.start = (row - 1, column + column", "if i == '00': row.append(0) if i == '-1': row.append(-1) file.append(row) solveMap(file) elif", "= \"\" for x in best.fGarden.map: for y in x: if y ==", "print(\"_________________________________________________________\") inp = input() # File Function - loading from File if inp", "00], [00, -1, 00, 00, 00, 00, 00, 00, 00, 00, 00, 00],", "2: position = helper[gene.rotation[x]] x += 1 if x == len(gene.rotation): x =", "K ' else: helper += '%2d ' % y helper += '\\n' print(helper)", "= self.genes[:pivotPoint] + other.genes[pivotPoint:] else: # Crossing third type - 3 - No", "else: helper += '%2d ' % y helper += '\\n' print(helper) print(\"_________________________________________________________\") print(\"Final", "finalprint(garden, bestChr, number) def finalprint(garden, best, number): print() print() # Printing all necesarry", "00, -1, 00, 00, 00, 00, 00, 00, 00, 00, 00], [00, 00,", "self.fitness() def generateRandomMap(self): self.m = random.randrange(2, 11) self.n = random.randrange(2, 11) self.map =", "position[1] not in range(g.n): break # We check if it is not raken", "-1, -1, -1, 00, 00, 00], [00, 00, 00, 00, 00, 00, -1,", "00, 00, 00, 00, 00, 00, 00, 00, 00], ] test4 = [", "j in range(self.n): if random.random() < 0.1: row.append(-1) else: row.append(0) self.map.append(row) def countrocks(self):", "and mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation = nextGeneration number = i # Formatted Print finalprint(garden,", "< 0.1: newChr.genes[i] = Gene(self.garden) # New rotations elif number < 0.2: newChr.genes[i].rotate()", "self.fitnessFunc() return break if goTo == '3' or goTo == '1': if helper.index(position)", "'2': position[1] -= 1 elif goTo == '4': position[1] += 1 # We", "with staying in the garden\") print(\"Test4 - Test with one tile exit\") print(\"_________________________________________________________\")", "self.map = copy.deepcopy(map) def countrocks(self): self.rocks = 0 for i in self.map: for", "new chromosomes def crossing(self, other): # Create new chromosome with empty genes newChr", "not we move there if g.map[position[0]][position[1]] == 0: continue # If we fing", "00, 00, 00], [00, 00, -1, 00, 00, 00, 00, 00, 00, 00,", "# Saving the best chromosome bestChr = max(generation, key=lambda x: x.fitness) nextGeneration =", "= len(map) self.n = len(map[0]) self.map = copy.deepcopy(map) def countrocks(self): self.rocks = 0", "00], ] class GGarden: def __init__(self): self.generateRandomMap() self.countrocks() self.fitness() def generateRandomMap(self): self.m =", "genesFill(self, garden, start): self.genes = [] helper = garden.m + garden.n + garden.rocks", "Chromosome by crossing and mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation = nextGeneration number = i #", "4\") print(\"Test1 - Model test\") print(\"Test2 - Unsolvable test\") print(\"Test3 - Test with", "[00, 00, 00, 00, 00, 00, -1, 00, -1, 00, 00, 00], [00,", "[]) for x in itera: if x > 0: self.fitness += 1 def", "itera = sum(self.fGarden.map, []) for x in itera: if x > 0: self.fitness", "def finalprint(garden, best, number): print() print() # Printing all necesarry generation information print('Generations:%4d", "self.goTo = '3' # Representing up # Moving up def right(self, row, column,", "00, 00, 00, 00, 00, 00], ] test2 = [ [00, 00, 00,", "there if g.map[position[0]][position[1]] == 0: continue # If we fing any obstacle we", "from file\") print(\"Write: Generate - Generate random map\") print(\"Write: Test - Test mode\")", "[00, 00, 00, 00, -1, 00, 00, 00, 00, 00, 00, 00], [00,", "raken else: if 'X' not in nv: self.fitnessFunc() return break if goTo ==", "mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation = nextGeneration number = i # Formatted Print finalprint(garden, bestChr,", "range(1000): # Saving the best chromosome bestChr = max(generation, key=lambda x: x.fitness) nextGeneration", "[00, 00, 00, 00, 00, 00, 00, -1, 00, -1, 00, 00], [00,", "garden\") print(\"Test4 - Test with one tile exit\") print(\"_________________________________________________________\") inp = input() if", "second type - 1 - Choosing random genes from both newChr.genes = []", "def crossing(self, other): # Create new chromosome with empty genes newChr = Chromosome(self.garden,", "00, -1, -1, -1, 00, 00], [00, 00, 00, 00, 00, 00, 00,", "- 2 - first part is from Chromosome 1, second is from Chromosome", "00, 00, -1, -1, -1, 00, 00, 00], [00, 00, 00, 00, 00,", "-1, 00, 00], [00, 00, 00, 00, 00, 00, 00, -1, -1, -1,", "Test with staying in the garden\") print(\"Test4 - Test with one tile exit\")", "] test4 = [ [00, 00, 00, 00, 00, 00, 00, -1, 00,", "availbale tests print(\"_________________________________________________________\") print(\"Choose the number of test from 1 - 4\") print(\"Test1", "Model test\") print(\"Test2 - Unsolvable test\") print(\"Test3 - Test with staying in the", "generation for j in range(49): # Choose random chromosomes from current generation chromosome1,", "0 # If everything is raken else: if 'X' not in nv: self.fitnessFunc()", "- Choosing random genes from both newChr.genes = [] for i in range(len(self.genes)):", "if j == 0: self.max_fitness += 1 class Gene: def __init__(self, garden): row", "type - 2 - first part is from Chromosome 1, second is from", "movement direction if goTo == '3': position[0] += 1 elif goTo == '1':", "of genes iterations for gene in self.genes: position = list(gene.start) goTo = gene.goTo", "up(self, row, column, number): self.start = (row - 1, column + column +", "number = 0 # Start of genes iterations for gene in self.genes: position", "chromosome bestChr = max(generation, key=lambda x: x.fitness) nextGeneration = [bestChr] # Check if", "test\") print(\"Test3 - Test with staying in the garden\") print(\"Test4 - Test with", "pivotPoint = random.randrange(len(self.genes)) if crossNum < 0.85: # Crossing second type - 1", "elif inp == '2': print(\"Counting...\") solveMap(test2) elif inp == '3': print(\"Counting...\") solveMap(test3) elif", "-1: self.rocks += 1 def fitness(self): self.max_fitness = 0 for i in self.map:", "goTo = '1' self.fitnessFunc() # Mutation of chromosomes def mutate(self, newChr): for i", "# Representing dow # Moving left def left(self, column, number): self.start = (number", "for x in garden.map: for y in x: if y == -1: helper", "+ column + row - number - 1) self.goTo = '3' # Representing", "can enter the garden if g.map[position[0]][position[1]] != 0: continue number += 1 while", "print(\"Choose the number of test from 1 - 4\") print(\"Test1 - Model test\")", "g.map[position[0]][position[1]] = number # We chose next tile to move on if goTo", "== '1': print(\"Counting...\") solveMap(test1) elif inp == '2': print(\"Counting...\") solveMap(test2) elif inp ==", "Moving down def down(self, number): self.start = (0, number) self.goTo = '1' #", "start) def genesFill(self, garden, start): self.genes = [] helper = garden.m + garden.n", "= len(map[0]) self.map = copy.deepcopy(map) def countrocks(self): self.rocks = 0 for i in", "- number - 1) self.goTo = '3' # Representing up # Moving up", "+ (row + column)) # Each direction has its own function for movement", "1 while (1): # Rake the tile g.map[position[0]][position[1]] = number # We chose", "new Chromosome by crossing and mutation nextGeneration.append(chromosome1.crossing(chromosome2)) generation = nextGeneration number = i", "best, number): print() print() # Printing all necesarry generation information print('Generations:%4d Max-Fitness:%4d Best-Fitness:%4d'", "newChr): for i in range(len(newChr.genes)): # New chromosome number = random.random() if number", "00, 00], [00, 00, 00, 00, 00, 00, 00, -1, -1, -1, 00,", "= '2' else: goTo = '4' else: if helper.index(position) == 0: goTo =", "goTo == '1': if helper.index(position) == 0: goTo = '2' else: goTo =", "00, 00], [00, 00, -1, 00, 00, 00, 00, 00, 00, 00, 00," ]
[ "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "writing, software # distributed under the License is distributed on an \"AS IS\"", "body + birthday.date.strftime(\"%B %d\") + \"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail, subject=\"Your upcoming birthdays\", body=body)", "body = body + birthday.date.strftime(\"%B %d\") + \"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail, subject=\"Your upcoming", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "# See the License for the specific language governing permissions and # limitations", "import webapp2 import utils from settings import appSettings from models import Owner, Birthday", "imports] import datetime from google.appengine.api import users from google.appengine.ext import ndb from google.appengine.api", "toEmail = birthday.owner.email body = body + birthday.firstName + birthday.lastName + \"<br />\"", "= body + birthday.firstName + birthday.lastName + \"<br />\" body = body +", "License. # You may obtain a copy of the License at # #", "= query.fetch(); birthdays = [] for user in allUsers: q1 = Birthday.query( user.owner.identity", "from google.appengine.api import users from google.appengine.ext import ndb from google.appengine.api import mail import", "\"Up coming birthdays:....\" for birthday in birthdays: toEmail = birthday.owner.email body = body", "law or agreed to in writing, software # distributed under the License is", "< currentMonthDay ) thisYearBDays = q2.fetch() nextYearBDays = q3.fetch() birthdays = thisYearBDays +", "the License for the specific language governing permissions and # limitations under the", "webapp2 import utils from settings import appSettings from models import Owner, Birthday class", "= q2.fetch() nextYearBDays = q3.fetch() birthdays = thisYearBDays + nextYearBDays body = \"Up", "compliance with the License. # You may obtain a copy of the License", "birthdays = thisYearBDays + nextYearBDays body = \"Up coming birthdays:....\" for birthday in", ") q3 = q1.filter( Birthday.monthday < currentMonthDay ) thisYearBDays = q2.fetch() nextYearBDays =", "thisYearBDays + nextYearBDays body = \"Up coming birthdays:....\" for birthday in birthdays: toEmail", "distinct=True) allUsers = query.fetch(); birthdays = [] for user in allUsers: q1 =", "== Birthday.owner.identity ).order( Birthday.monthday ) q2 = q1.filter( Birthday.monthday >= currentMonthDay ) q3", "= q1.filter( Birthday.monthday < currentMonthDay ) thisYearBDays = q2.fetch() nextYearBDays = q3.fetch() birthdays", "google.appengine.api import users from google.appengine.ext import ndb from google.appengine.api import mail import webapp2", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "this file except in compliance with the License. # You may obtain a", "(now.month) + \"%02d\" % (now.day) query = Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers = query.fetch(); birthdays", "q1.filter( Birthday.monthday < currentMonthDay ) thisYearBDays = q2.fetch() nextYearBDays = q3.fetch() birthdays =", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers = query.fetch(); birthdays = [] for user in allUsers: q1", "you may not use this file except in compliance with the License. #", "\"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail, subject=\"Your upcoming birthdays\", body=body) self.response.write(\"You have run birthdays cron", "query.fetch(); birthdays = [] for user in allUsers: q1 = Birthday.query( user.owner.identity ==", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "currentMonthDay = \"%02d\" % (now.month) + \"%02d\" % (now.day) query = Birthday.query(projection=[Birthday.owner.identity], distinct=True)", "q2 = q1.filter( Birthday.monthday >= currentMonthDay ) q3 = q1.filter( Birthday.monthday < currentMonthDay", "= \"Up coming birthdays:....\" for birthday in birthdays: toEmail = birthday.owner.email body =", "allUsers: q1 = Birthday.query( user.owner.identity == Birthday.owner.identity ).order( Birthday.monthday ) q2 = q1.filter(", "birthdays = [] for user in allUsers: q1 = Birthday.query( user.owner.identity == Birthday.owner.identity", "Owner, Birthday class Summary(webapp2.RequestHandler): def get(self): now = datetime.date.today() currentMonthDay = \"%02d\" %", "q2.fetch() nextYearBDays = q3.fetch() birthdays = thisYearBDays + nextYearBDays body = \"Up coming", "ANY KIND, either express or implied. # See the License for the specific", "import utils from settings import appSettings from models import Owner, Birthday class Summary(webapp2.RequestHandler):", "\"<br />\" body = body + birthday.date.strftime(\"%B %d\") + \"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail,", "%d\") + \"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail, subject=\"Your upcoming birthdays\", body=body) self.response.write(\"You have run", "language governing permissions and # limitations under the License. # [START imports] import", "in compliance with the License. # You may obtain a copy of the", "q3 = q1.filter( Birthday.monthday < currentMonthDay ) thisYearBDays = q2.fetch() nextYearBDays = q3.fetch()", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "limitations under the License. # [START imports] import datetime from google.appengine.api import users", "Summary(webapp2.RequestHandler): def get(self): now = datetime.date.today() currentMonthDay = \"%02d\" % (now.month) + \"%02d\"", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "use this file except in compliance with the License. # You may obtain", "governing permissions and # limitations under the License. # [START imports] import datetime", "now = datetime.date.today() currentMonthDay = \"%02d\" % (now.month) + \"%02d\" % (now.day) query", "q1.filter( Birthday.monthday >= currentMonthDay ) q3 = q1.filter( Birthday.monthday < currentMonthDay ) thisYearBDays", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "[] for user in allUsers: q1 = Birthday.query( user.owner.identity == Birthday.owner.identity ).order( Birthday.monthday", "not use this file except in compliance with the License. # You may", "<reponame>joehalloran/birthday_project # Copyright 2016 Google Inc. # # Licensed under the Apache License,", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "settings import appSettings from models import Owner, Birthday class Summary(webapp2.RequestHandler): def get(self): now", "models import Owner, Birthday class Summary(webapp2.RequestHandler): def get(self): now = datetime.date.today() currentMonthDay =", "class Summary(webapp2.RequestHandler): def get(self): now = datetime.date.today() currentMonthDay = \"%02d\" % (now.month) +", "ndb from google.appengine.api import mail import webapp2 import utils from settings import appSettings", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "License, Version 2.0 (the \"License\"); # you may not use this file except", "Birthday.monthday >= currentMonthDay ) q3 = q1.filter( Birthday.monthday < currentMonthDay ) thisYearBDays =", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "for user in allUsers: q1 = Birthday.query( user.owner.identity == Birthday.owner.identity ).order( Birthday.monthday )", "birthday.lastName + \"<br />\" body = body + birthday.date.strftime(\"%B %d\") + \"<hr />\"", "% (now.month) + \"%02d\" % (now.day) query = Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers = query.fetch();", "datetime from google.appengine.api import users from google.appengine.ext import ndb from google.appengine.api import mail", "\"%02d\" % (now.month) + \"%02d\" % (now.day) query = Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers =", "= [] for user in allUsers: q1 = Birthday.query( user.owner.identity == Birthday.owner.identity ).order(", "birthday in birthdays: toEmail = birthday.owner.email body = body + birthday.firstName + birthday.lastName", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "thisYearBDays = q2.fetch() nextYearBDays = q3.fetch() birthdays = thisYearBDays + nextYearBDays body =", "coming birthdays:....\" for birthday in birthdays: toEmail = birthday.owner.email body = body +", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "Google Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "OF ANY KIND, either express or implied. # See the License for the", "mail import webapp2 import utils from settings import appSettings from models import Owner,", "body = body + birthday.firstName + birthday.lastName + \"<br />\" body = body", "2.0 (the \"License\"); # you may not use this file except in compliance", "import datetime from google.appengine.api import users from google.appengine.ext import ndb from google.appengine.api import", "# you may not use this file except in compliance with the License.", "user.owner.identity == Birthday.owner.identity ).order( Birthday.monthday ) q2 = q1.filter( Birthday.monthday >= currentMonthDay )", "+ \"%02d\" % (now.day) query = Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers = query.fetch(); birthdays =", "Birthday.monthday ) q2 = q1.filter( Birthday.monthday >= currentMonthDay ) q3 = q1.filter( Birthday.monthday", "[START imports] import datetime from google.appengine.api import users from google.appengine.ext import ndb from", "# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version", "for the specific language governing permissions and # limitations under the License. #", "agreed to in writing, software # distributed under the License is distributed on", "License. # [START imports] import datetime from google.appengine.api import users from google.appengine.ext import", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "= \"%02d\" % (now.month) + \"%02d\" % (now.day) query = Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers", "utils from settings import appSettings from models import Owner, Birthday class Summary(webapp2.RequestHandler): def", "body = \"Up coming birthdays:....\" for birthday in birthdays: toEmail = birthday.owner.email body", "q1 = Birthday.query( user.owner.identity == Birthday.owner.identity ).order( Birthday.monthday ) q2 = q1.filter( Birthday.monthday", "(the \"License\"); # you may not use this file except in compliance with", "= datetime.date.today() currentMonthDay = \"%02d\" % (now.month) + \"%02d\" % (now.day) query =", "in allUsers: q1 = Birthday.query( user.owner.identity == Birthday.owner.identity ).order( Birthday.monthday ) q2 =", "# # Unless required by applicable law or agreed to in writing, software", ").order( Birthday.monthday ) q2 = q1.filter( Birthday.monthday >= currentMonthDay ) q3 = q1.filter(", "in birthdays: toEmail = birthday.owner.email body = body + birthday.firstName + birthday.lastName +", "express or implied. # See the License for the specific language governing permissions", "= q1.filter( Birthday.monthday >= currentMonthDay ) q3 = q1.filter( Birthday.monthday < currentMonthDay )", "+ birthday.firstName + birthday.lastName + \"<br />\" body = body + birthday.date.strftime(\"%B %d\")", "Version 2.0 (the \"License\"); # you may not use this file except in", "# Unless required by applicable law or agreed to in writing, software #", "# [START imports] import datetime from google.appengine.api import users from google.appengine.ext import ndb", "except in compliance with the License. # You may obtain a copy of", "import users from google.appengine.ext import ndb from google.appengine.api import mail import webapp2 import", "by applicable law or agreed to in writing, software # distributed under the", "birthday.date.strftime(\"%B %d\") + \"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail, subject=\"Your upcoming birthdays\", body=body) self.response.write(\"You have", "+ birthday.lastName + \"<br />\" body = body + birthday.date.strftime(\"%B %d\") + \"<hr", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "import Owner, Birthday class Summary(webapp2.RequestHandler): def get(self): now = datetime.date.today() currentMonthDay = \"%02d\"", "either express or implied. # See the License for the specific language governing", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "the License. # [START imports] import datetime from google.appengine.api import users from google.appengine.ext", "def get(self): now = datetime.date.today() currentMonthDay = \"%02d\" % (now.month) + \"%02d\" %", "= Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers = query.fetch(); birthdays = [] for user in allUsers:", "Birthday.owner.identity ).order( Birthday.monthday ) q2 = q1.filter( Birthday.monthday >= currentMonthDay ) q3 =", "file except in compliance with the License. # You may obtain a copy", "permissions and # limitations under the License. # [START imports] import datetime from", "google.appengine.api import mail import webapp2 import utils from settings import appSettings from models", "appSettings from models import Owner, Birthday class Summary(webapp2.RequestHandler): def get(self): now = datetime.date.today()", "the specific language governing permissions and # limitations under the License. # [START", "under the License. # [START imports] import datetime from google.appengine.api import users from", "q3.fetch() birthdays = thisYearBDays + nextYearBDays body = \"Up coming birthdays:....\" for birthday", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "(now.day) query = Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers = query.fetch(); birthdays = [] for user", "import ndb from google.appengine.api import mail import webapp2 import utils from settings import", "License for the specific language governing permissions and # limitations under the License.", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0", "the License. # You may obtain a copy of the License at #", "# limitations under the License. # [START imports] import datetime from google.appengine.api import", "Birthday.query( user.owner.identity == Birthday.owner.identity ).order( Birthday.monthday ) q2 = q1.filter( Birthday.monthday >= currentMonthDay", "birthday.firstName + birthday.lastName + \"<br />\" body = body + birthday.date.strftime(\"%B %d\") +", "+ \"<br />\" body = body + birthday.date.strftime(\"%B %d\") + \"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"],", "to in writing, software # distributed under the License is distributed on an", "specific language governing permissions and # limitations under the License. # [START imports]", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "allUsers = query.fetch(); birthdays = [] for user in allUsers: q1 = Birthday.query(", "implied. # See the License for the specific language governing permissions and #", "birthdays: toEmail = birthday.owner.email body = body + birthday.firstName + birthday.lastName + \"<br", "= birthday.owner.email body = body + birthday.firstName + birthday.lastName + \"<br />\" body", "= Birthday.query( user.owner.identity == Birthday.owner.identity ).order( Birthday.monthday ) q2 = q1.filter( Birthday.monthday >=", "\"License\"); # you may not use this file except in compliance with the", "birthdays:....\" for birthday in birthdays: toEmail = birthday.owner.email body = body + birthday.firstName", "from settings import appSettings from models import Owner, Birthday class Summary(webapp2.RequestHandler): def get(self):", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "+ birthday.date.strftime(\"%B %d\") + \"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail, subject=\"Your upcoming birthdays\", body=body) self.response.write(\"You", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "+ \"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail, subject=\"Your upcoming birthdays\", body=body) self.response.write(\"You have run birthdays", "required by applicable law or agreed to in writing, software # distributed under", "datetime.date.today() currentMonthDay = \"%02d\" % (now.month) + \"%02d\" % (now.day) query = Birthday.query(projection=[Birthday.owner.identity],", "google.appengine.ext import ndb from google.appengine.api import mail import webapp2 import utils from settings", "get(self): now = datetime.date.today() currentMonthDay = \"%02d\" % (now.month) + \"%02d\" % (now.day)", "= body + birthday.date.strftime(\"%B %d\") + \"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail, subject=\"Your upcoming birthdays\",", "Birthday class Summary(webapp2.RequestHandler): def get(self): now = datetime.date.today() currentMonthDay = \"%02d\" % (now.month)", "applicable law or agreed to in writing, software # distributed under the License", ">= currentMonthDay ) q3 = q1.filter( Birthday.monthday < currentMonthDay ) thisYearBDays = q2.fetch()", "% (now.day) query = Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers = query.fetch(); birthdays = [] for", "user in allUsers: q1 = Birthday.query( user.owner.identity == Birthday.owner.identity ).order( Birthday.monthday ) q2", "currentMonthDay ) q3 = q1.filter( Birthday.monthday < currentMonthDay ) thisYearBDays = q2.fetch() nextYearBDays", "from google.appengine.ext import ndb from google.appengine.api import mail import webapp2 import utils from", "= thisYearBDays + nextYearBDays body = \"Up coming birthdays:....\" for birthday in birthdays:", "/>\" body = body + birthday.date.strftime(\"%B %d\") + \"<hr />\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail, subject=\"Your", "users from google.appengine.ext import ndb from google.appengine.api import mail import webapp2 import utils", "or agreed to in writing, software # distributed under the License is distributed", "import mail import webapp2 import utils from settings import appSettings from models import", "import appSettings from models import Owner, Birthday class Summary(webapp2.RequestHandler): def get(self): now =", "nextYearBDays body = \"Up coming birthdays:....\" for birthday in birthdays: toEmail = birthday.owner.email", "body + birthday.firstName + birthday.lastName + \"<br />\" body = body + birthday.date.strftime(\"%B", "or implied. # See the License for the specific language governing permissions and", "for birthday in birthdays: toEmail = birthday.owner.email body = body + birthday.firstName +", "+ nextYearBDays body = \"Up coming birthdays:....\" for birthday in birthdays: toEmail =", "2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "currentMonthDay ) thisYearBDays = q2.fetch() nextYearBDays = q3.fetch() birthdays = thisYearBDays + nextYearBDays", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", ") q2 = q1.filter( Birthday.monthday >= currentMonthDay ) q3 = q1.filter( Birthday.monthday <", "from google.appengine.api import mail import webapp2 import utils from settings import appSettings from", "nextYearBDays = q3.fetch() birthdays = thisYearBDays + nextYearBDays body = \"Up coming birthdays:....\"", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "\"%02d\" % (now.day) query = Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers = query.fetch(); birthdays = []", "birthday.owner.email body = body + birthday.firstName + birthday.lastName + \"<br />\" body =", "and # limitations under the License. # [START imports] import datetime from google.appengine.api", "with the License. # You may obtain a copy of the License at", ") thisYearBDays = q2.fetch() nextYearBDays = q3.fetch() birthdays = thisYearBDays + nextYearBDays body", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "/>\" mail.send_mail(sender=appSettings[\"sender_address\"], to=toEmail, subject=\"Your upcoming birthdays\", body=body) self.response.write(\"You have run birthdays cron job\")", "in writing, software # distributed under the License is distributed on an \"AS", "= q3.fetch() birthdays = thisYearBDays + nextYearBDays body = \"Up coming birthdays:....\" for", "Birthday.monthday < currentMonthDay ) thisYearBDays = q2.fetch() nextYearBDays = q3.fetch() birthdays = thisYearBDays", "query = Birthday.query(projection=[Birthday.owner.identity], distinct=True) allUsers = query.fetch(); birthdays = [] for user in", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "from models import Owner, Birthday class Summary(webapp2.RequestHandler): def get(self): now = datetime.date.today() currentMonthDay" ]
[ "online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera = picamera.PiCamera(); camera.resolution = (1024, 768); message = \"", "light_induced(ldr): \"\"\"This starts/stops video recording if ldr<=200 and ldr>200 respectively \"\"\" global started", "contains the functions for taking video recordings and enabling/disabling light triggered automatic recording", "import anvil.server import picamera import takeImg import adc import smtplib import motionDetect motionState", "import picamera import takeImg import adc import smtplib import motionDetect motionState = False", "based on light intensity \"\"\" global message global online2 if online2==False: online2=True message=\"light", "recording\"\"\" global message global server if on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording started\" elif on1==False: camera.stop_recording()", "try: while True: if online2==True: light_induced(adc.readadc(0)) print \"light = \",adc.readadc(0) except KeyboardInterrupt: print(\"program", "if started==False: if ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True elif started==True: if ldr>200: camera.stop_recording() started=False server", "a video recording\"\"\" global online if online==False: online=True start_stop(online) elif online==True: online=False start_stop(online)", "message global server if on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording started\" elif on1==False: camera.stop_recording() message=\"footage captured\"", "on light intensity \"\"\" global message global online2 if online2==False: online2=True message=\"light induced", "camera.start_recording('Desktop/lightfootage.h264') started=True elif started==True: if ldr>200: camera.stop_recording() started=False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\")", "for taking video recordings and enabling/disabling light triggered automatic recording \"\"\" import anvil.server", "function to set/reset a flag to enable/disable automatic recording based on light intensity", "ldr<=200 and ldr>200 respectively \"\"\" global started if started==False: if ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True", "light intensity \"\"\" global message global online2 if online2==False: online2=True message=\"light induced recording", "message global online2 if online2==False: online2=True message=\"light induced recording enabled\" elif online2==True: online2=False", "message=\"light induced recording enabled\" elif online2==True: online2=False message=\"light induced recording disabled\" def start_stop(on1):", "\",adc.readadc(0) except KeyboardInterrupt: print(\"program terminated\") # This is the output when the program", "video recording\"\"\" global message global server if on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording started\" elif on1==False:", "functions for taking video recordings and enabling/disabling light triggered automatic recording \"\"\" import", "server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera = picamera.PiCamera(); camera.resolution = (1024, 768); message", "captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\", \"New video recording has been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit()", "mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT INDUCED FOOTAGE CAPTURED\", \"New light triggered video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit()", "and enabling/disabling light triggered automatic recording \"\"\" import anvil.server import picamera import takeImg", "on1==False: camera.stop_recording() message=\"footage captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\", \"New video recording has been", "message return message try: while True: if online2==True: light_induced(adc.readadc(0)) print \"light = \",adc.readadc(0)", "if online2==True: light_induced(adc.readadc(0)) print \"light = \",adc.readadc(0) except KeyboardInterrupt: print(\"program terminated\") # This", "set/reset a flag to enable/disable a video recording\"\"\" global online if online==False: online=True", "motionState = False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera", "to enable/disable automatic recording based on light intensity \"\"\" global message global online2", "FOOTAGE CAPTURED\", \"New light triggered video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable def display_message(): \"\"\"Callable", "= smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera = picamera.PiCamera(); camera.resolution =", "\"\"\"Callable function to set/reset a flag to enable/disable automatic recording based on light", "import takeImg import adc import smtplib import motionDetect motionState = False server =", "import adc import smtplib import motionDetect motionState = False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls()", "online2=False message=\"light induced recording disabled\" def start_stop(on1): \"\"\"This is the function that does", "recording if ldr<=200 and ldr>200 respectively \"\"\" global started if started==False: if ldr<=200:", "\"\"\" global message global online2 if online2==False: online2=True message=\"light induced recording enabled\" elif", "automatic recording based on light intensity \"\"\" global message global online2 if online2==False:", "does the actual video recording\"\"\" global message global server if on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording", "captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def light_induced(ldr): \"\"\"This starts/stops video recording if ldr<=200 and ldr>200", "online==False: online=True start_stop(online) elif online==True: online=False start_stop(online) @anvil.server.callable def takelightvideo(): \"\"\"Callable function to", "started\" elif on1==False: camera.stop_recording() message=\"footage captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\", \"New video recording", "if ldr<=200 and ldr>200 respectively \"\"\" global started if started==False: if ldr<=200: camera.start_recording('Desktop/lightfootage.h264')", "if ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True elif started==True: if ldr>200: camera.stop_recording() started=False server = smtplib.SMTP('smtp.gmail.com',587)", "function to display appropriate alert on button press\"\"\" global message return message try:", "takelightvideo(): \"\"\"Callable function to set/reset a flag to enable/disable automatic recording based on", "KeyboardInterrupt: print(\"program terminated\") # This is the output when the program is terminated", "= \" \" message2=\" \" @anvil.server.callable def takevideo(): \"\"\"Callable function to set/reset a", "elif on1==False: camera.stop_recording() message=\"footage captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\", \"New video recording has", "and ldr>200 respectively \"\"\" global started if started==False: if ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True elif", "LIGHT INDUCED FOOTAGE CAPTURED\", \"New light triggered video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable def", "\"\"\"Callable function to set/reset a flag to enable/disable a video recording\"\"\" global online", "import motionDetect motionState = False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False started=False", "to set/reset a flag to enable/disable a video recording\"\"\" global online if online==False:", "enable/disable a video recording\"\"\" global online if online==False: online=True start_stop(online) elif online==True: online=False", "online2==True: light_induced(adc.readadc(0)) print \"light = \",adc.readadc(0) except KeyboardInterrupt: print(\"program terminated\") # This is", "recording disabled\" def start_stop(on1): \"\"\"This is the function that does the actual video", "\"\"\" import anvil.server import picamera import takeImg import adc import smtplib import motionDetect", "\" message2=\" \" @anvil.server.callable def takevideo(): \"\"\"Callable function to set/reset a flag to", "anvil.server import picamera import takeImg import adc import smtplib import motionDetect motionState =", "enabling/disabling light triggered automatic recording \"\"\" import anvil.server import picamera import takeImg import", "online=False online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera = picamera.PiCamera(); camera.resolution = (1024, 768); message =", "global message global online2 if online2==False: online2=True message=\"light induced recording enabled\" elif online2==True:", "camera.start_recording('Desktop/footage.h264') message=\"recording started\" elif on1==False: camera.stop_recording() message=\"footage captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\", \"New", "respectively \"\"\" global started if started==False: if ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True elif started==True: if", "captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable def display_message(): \"\"\"Callable function to display appropriate alert on", "appropriate alert on button press\"\"\" global message return message try: while True: if", "started==False: if ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True elif started==True: if ldr>200: camera.stop_recording() started=False server =", "(1024, 768); message = \" \" message2=\" \" @anvil.server.callable def takevideo(): \"\"\"Callable function", "\"\"\"This is the function that does the actual video recording\"\"\" global message global", "global online2 if online2==False: online2=True message=\"light induced recording enabled\" elif online2==True: online2=False message=\"light", "started=True elif started==True: if ldr>200: camera.stop_recording() started=False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject:", "start_stop(on1): \"\"\"This is the function that does the actual video recording\"\"\" global message", "ldr>200: camera.stop_recording() started=False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT INDUCED FOOTAGE", "@anvil.server.callable def takelightvideo(): \"\"\"Callable function to set/reset a flag to enable/disable automatic recording", "alert on button press\"\"\" global message return message try: while True: if online2==True:", "camera.resolution = (1024, 768); message = \" \" message2=\" \" @anvil.server.callable def takevideo():", "\"\"\"Callable function to display appropriate alert on button press\"\"\" global message return message", "motionDetect motionState = False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\")", "online=True start_stop(online) elif online==True: online=False start_stop(online) @anvil.server.callable def takelightvideo(): \"\"\"Callable function to set/reset", "to set/reset a flag to enable/disable automatic recording based on light intensity \"\"\"", "set/reset a flag to enable/disable automatic recording based on light intensity \"\"\" global", "recording based on light intensity \"\"\" global message global online2 if online2==False: online2=True", "elif started==True: if ldr>200: camera.stop_recording() started=False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW", "def start_stop(on1): \"\"\"This is the function that does the actual video recording\"\"\" global", "started=False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT INDUCED FOOTAGE CAPTURED\", \"New", "768); message = \" \" message2=\" \" @anvil.server.callable def takevideo(): \"\"\"Callable function to", "to enable/disable a video recording\"\"\" global online if online==False: online=True start_stop(online) elif online==True:", "global started if started==False: if ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True elif started==True: if ldr>200: camera.stop_recording()", "{}\\n\\n{}\".format(\"NEW LIGHT INDUCED FOOTAGE CAPTURED\", \"New light triggered video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable", "if online==False: online=True start_stop(online) elif online==True: online=False start_stop(online) @anvil.server.callable def takelightvideo(): \"\"\"Callable function", "\"\"\"This module contains the functions for taking video recordings and enabling/disabling light triggered", "def takelightvideo(): \"\"\"Callable function to set/reset a flag to enable/disable automatic recording based", "server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT INDUCED FOOTAGE CAPTURED\", \"New light triggered video captured\")", "automatic recording \"\"\" import anvil.server import picamera import takeImg import adc import smtplib", "start_stop(online) @anvil.server.callable def takelightvideo(): \"\"\"Callable function to set/reset a flag to enable/disable automatic", "takeImg import adc import smtplib import motionDetect motionState = False server = smtplib.SMTP('smtp.gmail.com',587)", "= (1024, 768); message = \" \" message2=\" \" @anvil.server.callable def takevideo(): \"\"\"Callable", "True: if online2==True: light_induced(adc.readadc(0)) print \"light = \",adc.readadc(0) except KeyboardInterrupt: print(\"program terminated\") #", "camera = picamera.PiCamera(); camera.resolution = (1024, 768); message = \" \" message2=\" \"", "\" @anvil.server.callable def takevideo(): \"\"\"Callable function to set/reset a flag to enable/disable a", "server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT INDUCED FOOTAGE CAPTURED\", \"New light triggered video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse)", "FOOTAGE CAPTURED\", \"New video recording has been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def light_induced(ldr): \"\"\"This", "recording enabled\" elif online2==True: online2=False message=\"light induced recording disabled\" def start_stop(on1): \"\"\"This is", "\"\"\"This starts/stops video recording if ldr<=200 and ldr>200 respectively \"\"\" global started if", "picamera.PiCamera(); camera.resolution = (1024, 768); message = \" \" message2=\" \" @anvil.server.callable def", "server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable def display_message(): \"\"\"Callable function to display appropriate alert on button", "server.quit() @anvil.server.callable def display_message(): \"\"\"Callable function to display appropriate alert on button press\"\"\"", "smtplib import motionDetect motionState = False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False", "mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\", \"New video recording has been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def", "def display_message(): \"\"\"Callable function to display appropriate alert on button press\"\"\" global message", "started if started==False: if ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True elif started==True: if ldr>200: camera.stop_recording() started=False", "online2==True: online2=False message=\"light induced recording disabled\" def start_stop(on1): \"\"\"This is the function that", "\" \" message2=\" \" @anvil.server.callable def takevideo(): \"\"\"Callable function to set/reset a flag", "global message global server if on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording started\" elif on1==False: camera.stop_recording() message=\"footage", "recording \"\"\" import anvil.server import picamera import takeImg import adc import smtplib import", "recording has been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def light_induced(ldr): \"\"\"This starts/stops video recording if", "a flag to enable/disable a video recording\"\"\" global online if online==False: online=True start_stop(online)", "that does the actual video recording\"\"\" global message global server if on1==True: camera.start_recording('Desktop/footage.h264')", "the functions for taking video recordings and enabling/disabling light triggered automatic recording \"\"\"", "been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def light_induced(ldr): \"\"\"This starts/stops video recording if ldr<=200 and", "light triggered video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable def display_message(): \"\"\"Callable function to display", "module contains the functions for taking video recordings and enabling/disabling light triggered automatic", "{}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\", \"New video recording has been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def light_induced(ldr):", "\"light = \",adc.readadc(0) except KeyboardInterrupt: print(\"program terminated\") # This is the output when", "has been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def light_induced(ldr): \"\"\"This starts/stops video recording if ldr<=200", "induced recording enabled\" elif online2==True: online2=False message=\"light induced recording disabled\" def start_stop(on1): \"\"\"This", "server.quit() def light_induced(ldr): \"\"\"This starts/stops video recording if ldr<=200 and ldr>200 respectively \"\"\"", "message=\"footage captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\", \"New video recording has been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse)", "anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera = picamera.PiCamera(); camera.resolution = (1024, 768); message = \" \" message2=\"", "display appropriate alert on button press\"\"\" global message return message try: while True:", "recordings and enabling/disabling light triggered automatic recording \"\"\" import anvil.server import picamera import", "flag to enable/disable automatic recording based on light intensity \"\"\" global message global", "intensity \"\"\" global message global online2 if online2==False: online2=True message=\"light induced recording enabled\"", "smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT INDUCED FOOTAGE CAPTURED\", \"New light triggered video", "import smtplib import motionDetect motionState = False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False", "@anvil.server.callable def display_message(): \"\"\"Callable function to display appropriate alert on button press\"\"\" global", "disabled\" def start_stop(on1): \"\"\"This is the function that does the actual video recording\"\"\"", "ldr>200 respectively \"\"\" global started if started==False: if ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True elif started==True:", "started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera = picamera.PiCamera(); camera.resolution = (1024, 768); message = \" \"", "starts/stops video recording if ldr<=200 and ldr>200 respectively \"\"\" global started if started==False:", "except KeyboardInterrupt: print(\"program terminated\") # This is the output when the program is", "function to set/reset a flag to enable/disable a video recording\"\"\" global online if", "online2 if online2==False: online2=True message=\"light induced recording enabled\" elif online2==True: online2=False message=\"light induced", "def light_induced(ldr): \"\"\"This starts/stops video recording if ldr<=200 and ldr>200 respectively \"\"\" global", "print \"light = \",adc.readadc(0) except KeyboardInterrupt: print(\"program terminated\") # This is the output", "online if online==False: online=True start_stop(online) elif online==True: online=False start_stop(online) @anvil.server.callable def takelightvideo(): \"\"\"Callable", "server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera = picamera.PiCamera(); camera.resolution = (1024, 768);", "actual video recording\"\"\" global message global server if on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording started\" elif", "global server if on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording started\" elif on1==False: camera.stop_recording() message=\"footage captured\" mse=\"Subject:", "flag to enable/disable a video recording\"\"\" global online if online==False: online=True start_stop(online) elif", "recording\"\"\" global online if online==False: online=True start_stop(online) elif online==True: online=False start_stop(online) @anvil.server.callable def", "elif online2==True: online2=False message=\"light induced recording disabled\" def start_stop(on1): \"\"\"This is the function", "CAPTURED\", \"New video recording has been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def light_induced(ldr): \"\"\"This starts/stops", "= \",adc.readadc(0) except KeyboardInterrupt: print(\"program terminated\") # This is the output when the", "return message try: while True: if online2==True: light_induced(adc.readadc(0)) print \"light = \",adc.readadc(0) except", "message try: while True: if online2==True: light_induced(adc.readadc(0)) print \"light = \",adc.readadc(0) except KeyboardInterrupt:", "online=False start_stop(online) @anvil.server.callable def takelightvideo(): \"\"\"Callable function to set/reset a flag to enable/disable", "message2=\" \" @anvil.server.callable def takevideo(): \"\"\"Callable function to set/reset a flag to enable/disable", "on button press\"\"\" global message return message try: while True: if online2==True: light_induced(adc.readadc(0))", "triggered video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable def display_message(): \"\"\"Callable function to display appropriate", "= picamera.PiCamera(); camera.resolution = (1024, 768); message = \" \" message2=\" \" @anvil.server.callable", "the function that does the actual video recording\"\"\" global message global server if", "the actual video recording\"\"\" global message global server if on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording started\"", "server if on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording started\" elif on1==False: camera.stop_recording() message=\"footage captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW", "CAPTURED\", \"New light triggered video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable def display_message(): \"\"\"Callable function", "button press\"\"\" global message return message try: while True: if online2==True: light_induced(adc.readadc(0)) print", "picamera import takeImg import adc import smtplib import motionDetect motionState = False server", "server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT INDUCED FOOTAGE CAPTURED\", \"New light", "False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera = picamera.PiCamera();", "a flag to enable/disable automatic recording based on light intensity \"\"\" global message", "triggered automatic recording \"\"\" import anvil.server import picamera import takeImg import adc import", "message=\"recording started\" elif on1==False: camera.stop_recording() message=\"footage captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\", \"New video", "\"New light triggered video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable def display_message(): \"\"\"Callable function to", "video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable def display_message(): \"\"\"Callable function to display appropriate alert", "light triggered automatic recording \"\"\" import anvil.server import picamera import takeImg import adc", "ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True elif started==True: if ldr>200: camera.stop_recording() started=False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls()", "= smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT INDUCED FOOTAGE CAPTURED\", \"New light triggered", "while True: if online2==True: light_induced(adc.readadc(0)) print \"light = \",adc.readadc(0) except KeyboardInterrupt: print(\"program terminated\")", "def takevideo(): \"\"\"Callable function to set/reset a flag to enable/disable a video recording\"\"\"", "camera.stop_recording() message=\"footage captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\", \"New video recording has been captured\")", "adc import smtplib import motionDetect motionState = False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\")", "is the function that does the actual video recording\"\"\" global message global server", "@anvil.server.callable def takevideo(): \"\"\"Callable function to set/reset a flag to enable/disable a video", "if on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording started\" elif on1==False: camera.stop_recording() message=\"footage captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE", "video recording has been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def light_induced(ldr): \"\"\"This starts/stops video recording", "message=\"light induced recording disabled\" def start_stop(on1): \"\"\"This is the function that does the", "online2=True message=\"light induced recording enabled\" elif online2==True: online2=False message=\"light induced recording disabled\" def", "video recording if ldr<=200 and ldr>200 respectively \"\"\" global started if started==False: if", "global online if online==False: online=True start_stop(online) elif online==True: online=False start_stop(online) @anvil.server.callable def takelightvideo():", "if online2==False: online2=True message=\"light induced recording enabled\" elif online2==True: online2=False message=\"light induced recording", "video recording\"\"\" global online if online==False: online=True start_stop(online) elif online==True: online=False start_stop(online) @anvil.server.callable", "\"\"\" global started if started==False: if ldr<=200: camera.start_recording('Desktop/lightfootage.h264') started=True elif started==True: if ldr>200:", "on1==True: camera.start_recording('Desktop/footage.h264') message=\"recording started\" elif on1==False: camera.stop_recording() message=\"footage captured\" mse=\"Subject: {}\\n\\n{}\".format(\"NEW FOOTAGE CAPTURED\",", "display_message(): \"\"\"Callable function to display appropriate alert on button press\"\"\" global message return", "started==True: if ldr>200: camera.stop_recording() started=False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT", "press\"\"\" global message return message try: while True: if online2==True: light_induced(adc.readadc(0)) print \"light", "online2==False: online2=True message=\"light induced recording enabled\" elif online2==True: online2=False message=\"light induced recording disabled\"", "takevideo(): \"\"\"Callable function to set/reset a flag to enable/disable a video recording\"\"\" global", "induced recording disabled\" def start_stop(on1): \"\"\"This is the function that does the actual", "server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera = picamera.PiCamera(); camera.resolution", "\"New video recording has been captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def light_induced(ldr): \"\"\"This starts/stops video", "taking video recordings and enabling/disabling light triggered automatic recording \"\"\" import anvil.server import", "smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera = picamera.PiCamera(); camera.resolution = (1024,", "if ldr>200: camera.stop_recording() started=False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT INDUCED", "function that does the actual video recording\"\"\" global message global server if on1==True:", "INDUCED FOOTAGE CAPTURED\", \"New light triggered video captured\") server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() @anvil.server.callable def display_message():", "light_induced(adc.readadc(0)) print \"light = \",adc.readadc(0) except KeyboardInterrupt: print(\"program terminated\") # This is the", "enabled\" elif online2==True: online2=False message=\"light induced recording disabled\" def start_stop(on1): \"\"\"This is the", "= False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") online=False online2=False started=False anvil.server.connect(\"BP2P6K2WEAVZG7EMXI56O6AU-2HHQWLD3DEXHTCGB\") camera =", "server.sendmail(\"<EMAIL>\",\"<EMAIL>\",mse) server.quit() def light_induced(ldr): \"\"\"This starts/stops video recording if ldr<=200 and ldr>200 respectively", "start_stop(online) elif online==True: online=False start_stop(online) @anvil.server.callable def takelightvideo(): \"\"\"Callable function to set/reset a", "to display appropriate alert on button press\"\"\" global message return message try: while", "elif online==True: online=False start_stop(online) @anvil.server.callable def takelightvideo(): \"\"\"Callable function to set/reset a flag", "message = \" \" message2=\" \" @anvil.server.callable def takevideo(): \"\"\"Callable function to set/reset", "online==True: online=False start_stop(online) @anvil.server.callable def takelightvideo(): \"\"\"Callable function to set/reset a flag to", "video recordings and enabling/disabling light triggered automatic recording \"\"\" import anvil.server import picamera", "camera.stop_recording() started=False server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(\"<EMAIL>\",\"cam25project\") mse=\"Subject: {}\\n\\n{}\".format(\"NEW LIGHT INDUCED FOOTAGE CAPTURED\",", "enable/disable automatic recording based on light intensity \"\"\" global message global online2 if", "global message return message try: while True: if online2==True: light_induced(adc.readadc(0)) print \"light =" ]
[ "time, title, expt, col, path, recent): \"\"\"\\b _____ _____ ______ / ____| /", "return @gcf.command() @click.pass_context def statistic(ctx): \"\"\"Show results with statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return @gcf.command()", "| | __| | |__| | | |____ | | \\_____| \\_____| |_|", "@click.pass_context def career(ctx, size, perc, ratio): \"\"\"Show results with the career mode\"\"\" gm.care_mode(ctx.obj['final_pd'],", "<reponame>pangjie/gcf<filename>gcf/gcf.py # !/usr/bin/env python # -*- coding: utf-8 -*- import click from .", "'-e', default=False, is_flag=True, help='Export data to a cvs file') @click.option('--col', '-c', default='date#title#dj', help='Show", "size, perc, ratio): \"\"\"Show results with the career mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc,", "# !/usr/bin/env python # -*- coding: utf-8 -*- import click from . import", "import click from . import gcf_mining as gm from . import gcf_scrape as", "@click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj', '-d', default='', help=u'Search by dj, such as -d \\'西蒙#四十二\\' or", "is_flag=True, help='Reload the whole data.') @click.pass_context def update(ctx, deep): \"\"\"Update data to the", "\"\"\"Drop all radio information\"\"\" gs.drop_raido() return def main(): gcf(obj={}) if __name__ == '__main__':", "import gcf_scrape as gs @click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj', '-d', default='', help=u'Search by dj, such", "default=False, is_flag=True, help='Export data to a cvs file') @click.option('--col', '-c', default='date#title#dj', help='Show data", "|_ | | | | __| | |__| | | |____ | |", "= gm.kw_mining(dj_pd, u'radio_program', filter(None, prog.split('#'))) title_pd = gm.kw_mining(prog_pd, u'radio_title', filter(None, title.split('#'))) time_pd =", "gm from . import gcf_scrape as gs @click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj', '-d', default='', help=u'Search", "title, expt, col, path, recent): \"\"\"\\b _____ _____ ______ / ____| / ____|", "Tool \"\"\" if ctx.invoked_subcommand == 'update': return col = map((lambda x: 'radio_' +", "default=2, type=float, help='Graphic Ratio. Smaller value, more width graphic.') @click.pass_context def career(ctx, size,", "ctx.invoked_subcommand == 'statistic': ctx.obj['final_pd'] = final_pd return if ctx.invoked_subcommand is None: gm.df_print(final_pd, col)", "__| | |__| | | |____ | | \\_____| \\_____| |_| G-Cores(g-cores.com) Fans", "results with the career mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc, ratio) return @gcf.command() @click.pass_context", "\\'西蒙#四十二\\' or \\'西蒙\\'.\\ Use #, if you need a dj group.') @click.option('--prog', '-p',", "all radio information\"\"\" gs.drop_raido() return def main(): gcf(obj={}) if __name__ == '__main__': main()", "@click.pass_context def update(ctx, deep): \"\"\"Update data to the last gadio\"\"\" gs.scrape_radio_list(deep) return @gcf.command()", "if ctx.invoked_subcommand is None: gm.df_print(final_pd, col) return return @gcf.command() @click.option('--deep', default=False, is_flag=True, help='Reload", "as gm from . import gcf_scrape as gs @click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj', '-d', default='',", "= final_pd return if ctx.invoked_subcommand is None: gm.df_print(final_pd, col) return return @gcf.command() @click.option('--deep',", "graphic.') @click.pass_context def career(ctx, size, perc, ratio): \"\"\"Show results with the career mode\"\"\"", "return @gcf.command() @click.pass_context def drop(ctx): \"\"\"Drop all radio information\"\"\" gs.drop_raido() return def main():", "default=9999, help='Show the most recent gadios.') @click.pass_context def gcf(ctx, dj, prog, time, title,", "= final_pd ctx.obj['path'] = path return if ctx.invoked_subcommand == 'statistic': ctx.obj['final_pd'] = final_pd", "final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False) if ctx.invoked_subcommand == 'career': ctx.obj['final_pd'] = final_pd ctx.obj['path'] = path", "| ____| | | __ | | | |__ | | |_ |", "help='Choice the statistic size: Y(year),Q(quarter),M(month).') @click.option('--perc', '-p', default=False, is_flag=True, help='Show data with the", "'radio_' + str(x)), filter(None, col.split('#'))) dj_pd = gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None, dj.split('#'))) prog_pd =", "/ ____| | ____| | | __ | | | |__ | |", "default='Y', type=click.Choice(['Y', 'Q', 'M']), help='Choice the statistic size: Y(year),Q(quarter),M(month).') @click.option('--perc', '-p', default=False, is_flag=True,", "== 'statistic': ctx.obj['final_pd'] = final_pd return if ctx.invoked_subcommand is None: gm.df_print(final_pd, col) return", "by dj, such as -d \\'西蒙#四十二\\' or \\'西蒙\\'.\\ Use #, if you need", "return return @gcf.command() @click.option('--deep', default=False, is_flag=True, help='Reload the whole data.') @click.pass_context def update(ctx,", "specific columns as -tb \\'date#title#dj\\'') @click.option('--path', '-pt', default='', help='Give the path to loading", "default='', help='Search by program, such as -p \\'gadio\\'.\\ Use #, if you need", "ctx.invoked_subcommand == 'update': return col = map((lambda x: 'radio_' + str(x)), filter(None, col.split('#')))", "map((lambda x: 'radio_' + str(x)), filter(None, col.split('#'))) dj_pd = gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None, dj.split('#')))", "@gcf.command() @click.option('--deep', default=False, is_flag=True, help='Reload the whole data.') @click.pass_context def update(ctx, deep): \"\"\"Update", "help='Show data with the percentage of whole set.') @click.option('--ratio', '-r', default=2, type=float, help='Graphic", "default='', help=u'Search by dj, such as -d \\'西蒙#四十二\\' or \\'西蒙\\'.\\ Use #, if", "/ ____| / ____| | ____| | | __ | | | |__", "'update': return col = map((lambda x: 'radio_' + str(x)), filter(None, col.split('#'))) dj_pd =", "return if ctx.invoked_subcommand == 'statistic': ctx.obj['final_pd'] = final_pd return if ctx.invoked_subcommand is None:", "with the percentage of whole set.') @click.option('--ratio', '-r', default=2, type=float, help='Graphic Ratio. Smaller", "statistic size: Y(year),Q(quarter),M(month).') @click.option('--perc', '-p', default=False, is_flag=True, help='Show data with the percentage of", "loading another cvs data.') @click.option('--recent', '-r', default=9999, help='Show the most recent gadios.') @click.pass_context", "| |_ | | | | __| | |__| | | |____ |", "data with specific columns as -tb \\'date#title#dj\\'') @click.option('--path', '-pt', default='', help='Give the path", "cvs data.') @click.option('--recent', '-r', default=9999, help='Show the most recent gadios.') @click.pass_context def gcf(ctx,", "expt: final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False) if ctx.invoked_subcommand == 'career': ctx.obj['final_pd'] = final_pd ctx.obj['path'] =", "#, if you need a dj group.') @click.option('--prog', '-p', default='', help='Search by program,", "type=click.Choice(['Y', 'Q', 'M']), help='Choice the statistic size: Y(year),Q(quarter),M(month).') @click.option('--perc', '-p', default=False, is_flag=True, help='Show", "--- A Gadio Info Tool \"\"\" if ctx.invoked_subcommand == 'update': return col =", "20180302') @click.option('--expt', '-e', default=False, is_flag=True, help='Export data to a cvs file') @click.option('--col', '-c',", "default='', help='Give the path to loading another cvs data.') @click.option('--recent', '-r', default=9999, help='Show", "\"\"\" if ctx.invoked_subcommand == 'update': return col = map((lambda x: 'radio_' + str(x)),", "return @gcf.command() @click.option('--deep', default=False, is_flag=True, help='Reload the whole data.') @click.pass_context def update(ctx, deep):", "drop(ctx): \"\"\"Drop all radio information\"\"\" gs.drop_raido() return def main(): gcf(obj={}) if __name__ ==", "| | | |__ | | |_ | | | | __| |", "'-t', nargs=2, default=('19850611', '20850611'), help='Search by release time, such as -t 20180101 20180302')", "= map((lambda x: 'radio_' + str(x)), filter(None, col.split('#'))) dj_pd = gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None,", "@click.pass_context def statistic(ctx): \"\"\"Show results with statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return @gcf.command() @click.pass_context def", "program, such as -p \\'gadio\\'.\\ Use #, if you need multiple programs.') @click.option('--title',", "prog, time, title, expt, col, path, recent): \"\"\"\\b _____ _____ ______ / ____|", "col.split('#'))) dj_pd = gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None, dj.split('#'))) prog_pd = gm.kw_mining(dj_pd, u'radio_program', filter(None, prog.split('#')))", "size, perc, ratio) return @gcf.command() @click.pass_context def statistic(ctx): \"\"\"Show results with statistic mode\"\"\"", "default=False, is_flag=True, help='Reload the whole data.') @click.pass_context def update(ctx, deep): \"\"\"Update data to", "with specific columns as -tb \\'date#title#dj\\'') @click.option('--path', '-pt', default='', help='Give the path to", "import gcf_mining as gm from . import gcf_scrape as gs @click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj',", "!/usr/bin/env python # -*- coding: utf-8 -*- import click from . import gcf_mining", "| __ | | | |__ | | |_ | | | |", "dj.split('#'))) prog_pd = gm.kw_mining(dj_pd, u'radio_program', filter(None, prog.split('#'))) title_pd = gm.kw_mining(prog_pd, u'radio_title', filter(None, title.split('#')))", "value, more width graphic.') @click.pass_context def career(ctx, size, perc, ratio): \"\"\"Show results with", "@click.option('--deep', default=False, is_flag=True, help='Reload the whole data.') @click.pass_context def update(ctx, deep): \"\"\"Update data", "percentage of whole set.') @click.option('--ratio', '-r', default=2, type=float, help='Graphic Ratio. Smaller value, more", "def update(ctx, deep): \"\"\"Update data to the last gadio\"\"\" gs.scrape_radio_list(deep) return @gcf.command() @click.option('--size',", "@click.option('--recent', '-r', default=9999, help='Show the most recent gadios.') @click.pass_context def gcf(ctx, dj, prog,", "# -*- coding: utf-8 -*- import click from . import gcf_mining as gm", "gm.timing_mining(title_pd, time[0], time[1]) final_pd = gm.recent(time_pd, recent) if expt: final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False) if", "Y(year),Q(quarter),M(month).') @click.option('--perc', '-p', default=False, is_flag=True, help='Show data with the percentage of whole set.')", "\"\"\"\\b _____ _____ ______ / ____| / ____| | ____| | | __", "filter(None, col.split('#'))) dj_pd = gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None, dj.split('#'))) prog_pd = gm.kw_mining(dj_pd, u'radio_program', filter(None,", "gm.df_print(final_pd, col) return return @gcf.command() @click.option('--deep', default=False, is_flag=True, help='Reload the whole data.') @click.pass_context", "| | |____ | | \\_____| \\_____| |_| G-Cores(g-cores.com) Fans --- A Gadio", "is_flag=True, help='Show data with the percentage of whole set.') @click.option('--ratio', '-r', default=2, type=float,", "with statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return @gcf.command() @click.pass_context def drop(ctx): \"\"\"Drop all radio information\"\"\"", "gcf_scrape as gs @click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj', '-d', default='', help=u'Search by dj, such as", "results with statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return @gcf.command() @click.pass_context def drop(ctx): \"\"\"Drop all radio", "'-tt', default='', help=u'Search by program, such as -tt \\'的\\'.') @click.option('--time', '-t', nargs=2, default=('19850611',", "-tt \\'的\\'.') @click.option('--time', '-t', nargs=2, default=('19850611', '20850611'), help='Search by release time, such as", "| | |_ | | | | __| | |__| | | |____", "gadios.') @click.pass_context def gcf(ctx, dj, prog, time, title, expt, col, path, recent): \"\"\"\\b", "if ctx.invoked_subcommand == 'update': return col = map((lambda x: 'radio_' + str(x)), filter(None,", "the percentage of whole set.') @click.option('--ratio', '-r', default=2, type=float, help='Graphic Ratio. Smaller value,", "another cvs data.') @click.option('--recent', '-r', default=9999, help='Show the most recent gadios.') @click.pass_context def", "== 'update': return col = map((lambda x: 'radio_' + str(x)), filter(None, col.split('#'))) dj_pd", "G-Cores(g-cores.com) Fans --- A Gadio Info Tool \"\"\" if ctx.invoked_subcommand == 'update': return", "+ str(x)), filter(None, col.split('#'))) dj_pd = gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None, dj.split('#'))) prog_pd = gm.kw_mining(dj_pd,", "recent) if expt: final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False) if ctx.invoked_subcommand == 'career': ctx.obj['final_pd'] = final_pd", "def statistic(ctx): \"\"\"Show results with statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return @gcf.command() @click.pass_context def drop(ctx):", "Gadio Info Tool \"\"\" if ctx.invoked_subcommand == 'update': return col = map((lambda x:", "prog.split('#'))) title_pd = gm.kw_mining(prog_pd, u'radio_title', filter(None, title.split('#'))) time_pd = gm.timing_mining(title_pd, time[0], time[1]) final_pd", "| |__ | | |_ | | | | __| | |__| |", "from . import gcf_scrape as gs @click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj', '-d', default='', help=u'Search by", "you need a dj group.') @click.option('--prog', '-p', default='', help='Search by program, such as", "gcf_mining as gm from . import gcf_scrape as gs @click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj', '-d',", "final_pd ctx.obj['path'] = path return if ctx.invoked_subcommand == 'statistic': ctx.obj['final_pd'] = final_pd return", "ctx.invoked_subcommand is None: gm.df_print(final_pd, col) return return @gcf.command() @click.option('--deep', default=False, is_flag=True, help='Reload the", "need multiple programs.') @click.option('--title', '-tt', default='', help=u'Search by program, such as -tt \\'的\\'.')", "return col = map((lambda x: 'radio_' + str(x)), filter(None, col.split('#'))) dj_pd = gm.kw_mining(gm.df_pk(path),", "dj group.') @click.option('--prog', '-p', default='', help='Search by program, such as -p \\'gadio\\'.\\ Use", "gm.recent(time_pd, recent) if expt: final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False) if ctx.invoked_subcommand == 'career': ctx.obj['final_pd'] =", "'-p', default='', help='Search by program, such as -p \\'gadio\\'.\\ Use #, if you", "'-pt', default='', help='Give the path to loading another cvs data.') @click.option('--recent', '-r', default=9999,", "nargs=2, default=('19850611', '20850611'), help='Search by release time, such as -t 20180101 20180302') @click.option('--expt',", "data to the last gadio\"\"\" gs.scrape_radio_list(deep) return @gcf.command() @click.option('--size', '-s', default='Y', type=click.Choice(['Y', 'Q',", "gs.scrape_radio_list(deep) return @gcf.command() @click.option('--size', '-s', default='Y', type=click.Choice(['Y', 'Q', 'M']), help='Choice the statistic size:", "is_flag=True, help='Export data to a cvs file') @click.option('--col', '-c', default='date#title#dj', help='Show data with", "col) return return @gcf.command() @click.option('--deep', default=False, is_flag=True, help='Reload the whole data.') @click.pass_context def", "group.') @click.option('--prog', '-p', default='', help='Search by program, such as -p \\'gadio\\'.\\ Use #,", "| |__| | | |____ | | \\_____| \\_____| |_| G-Cores(g-cores.com) Fans ---", "filter(None, title.split('#'))) time_pd = gm.timing_mining(title_pd, time[0], time[1]) final_pd = gm.recent(time_pd, recent) if expt:", "____| | | __ | | | |__ | | |_ | |", "@click.option('--perc', '-p', default=False, is_flag=True, help='Show data with the percentage of whole set.') @click.option('--ratio',", "such as -d \\'西蒙#四十二\\' or \\'西蒙\\'.\\ Use #, if you need a dj", "dj, such as -d \\'西蒙#四十二\\' or \\'西蒙\\'.\\ Use #, if you need a", "or \\'西蒙\\'.\\ Use #, if you need a dj group.') @click.option('--prog', '-p', default='',", "whole data.') @click.pass_context def update(ctx, deep): \"\"\"Update data to the last gadio\"\"\" gs.scrape_radio_list(deep)", "such as -tt \\'的\\'.') @click.option('--time', '-t', nargs=2, default=('19850611', '20850611'), help='Search by release time,", "Use #, if you need multiple programs.') @click.option('--title', '-tt', default='', help=u'Search by program,", "'statistic': ctx.obj['final_pd'] = final_pd return if ctx.invoked_subcommand is None: gm.df_print(final_pd, col) return return", "default='date#title#dj', help='Show data with specific columns as -tb \\'date#title#dj\\'') @click.option('--path', '-pt', default='', help='Give", "if expt: final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False) if ctx.invoked_subcommand == 'career': ctx.obj['final_pd'] = final_pd ctx.obj['path']", "def career(ctx, size, perc, ratio): \"\"\"Show results with the career mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'],", "col, path, recent): \"\"\"\\b _____ _____ ______ / ____| / ____| | ____|", "-d \\'西蒙#四十二\\' or \\'西蒙\\'.\\ Use #, if you need a dj group.') @click.option('--prog',", "'career': ctx.obj['final_pd'] = final_pd ctx.obj['path'] = path return if ctx.invoked_subcommand == 'statistic': ctx.obj['final_pd']", "def gcf(ctx, dj, prog, time, title, expt, col, path, recent): \"\"\"\\b _____ _____", "== 'career': ctx.obj['final_pd'] = final_pd ctx.obj['path'] = path return if ctx.invoked_subcommand == 'statistic':", "a dj group.') @click.option('--prog', '-p', default='', help='Search by program, such as -p \\'gadio\\'.\\", "gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None, dj.split('#'))) prog_pd = gm.kw_mining(dj_pd, u'radio_program', filter(None, prog.split('#'))) title_pd = gm.kw_mining(prog_pd,", "the most recent gadios.') @click.pass_context def gcf(ctx, dj, prog, time, title, expt, col,", "dj, prog, time, title, expt, col, path, recent): \"\"\"\\b _____ _____ ______ /", "col = map((lambda x: 'radio_' + str(x)), filter(None, col.split('#'))) dj_pd = gm.kw_mining(gm.df_pk(path), u'radio_dj',", "career mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc, ratio) return @gcf.command() @click.pass_context def statistic(ctx): \"\"\"Show", "filter(None, prog.split('#'))) title_pd = gm.kw_mining(prog_pd, u'radio_title', filter(None, title.split('#'))) time_pd = gm.timing_mining(title_pd, time[0], time[1])", "| | \\_____| \\_____| |_| G-Cores(g-cores.com) Fans --- A Gadio Info Tool \"\"\"", ". import gcf_mining as gm from . import gcf_scrape as gs @click.group(invoke_without_command=True, no_args_is_help=True)", "= path return if ctx.invoked_subcommand == 'statistic': ctx.obj['final_pd'] = final_pd return if ctx.invoked_subcommand", "as -p \\'gadio\\'.\\ Use #, if you need multiple programs.') @click.option('--title', '-tt', default='',", "| | |__ | | |_ | | | | __| | |__|", "@click.option('--size', '-s', default='Y', type=click.Choice(['Y', 'Q', 'M']), help='Choice the statistic size: Y(year),Q(quarter),M(month).') @click.option('--perc', '-p',", "\\'gadio\\'.\\ Use #, if you need multiple programs.') @click.option('--title', '-tt', default='', help=u'Search by", "time[1]) final_pd = gm.recent(time_pd, recent) if expt: final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False) if ctx.invoked_subcommand ==", "default=False, is_flag=True, help='Show data with the percentage of whole set.') @click.option('--ratio', '-r', default=2,", "with the career mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc, ratio) return @gcf.command() @click.pass_context def", "\"\"\"Update data to the last gadio\"\"\" gs.scrape_radio_list(deep) return @gcf.command() @click.option('--size', '-s', default='Y', type=click.Choice(['Y',", "time, such as -t 20180101 20180302') @click.option('--expt', '-e', default=False, is_flag=True, help='Export data to", "'-p', default=False, is_flag=True, help='Show data with the percentage of whole set.') @click.option('--ratio', '-r',", "@click.pass_context def gcf(ctx, dj, prog, time, title, expt, col, path, recent): \"\"\"\\b _____", "_____ _____ ______ / ____| / ____| | ____| | | __ |", "the last gadio\"\"\" gs.scrape_radio_list(deep) return @gcf.command() @click.option('--size', '-s', default='Y', type=click.Choice(['Y', 'Q', 'M']), help='Choice", "help='Reload the whole data.') @click.pass_context def update(ctx, deep): \"\"\"Update data to the last", "ctx.obj['final_pd'] = final_pd return if ctx.invoked_subcommand is None: gm.df_print(final_pd, col) return return @gcf.command()", "career(ctx, size, perc, ratio): \"\"\"Show results with the career mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size,", "as gs @click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj', '-d', default='', help=u'Search by dj, such as -d", "by program, such as -p \\'gadio\\'.\\ Use #, if you need multiple programs.')", "index=False) if ctx.invoked_subcommand == 'career': ctx.obj['final_pd'] = final_pd ctx.obj['path'] = path return if", "return @gcf.command() @click.option('--size', '-s', default='Y', type=click.Choice(['Y', 'Q', 'M']), help='Choice the statistic size: Y(year),Q(quarter),M(month).')", "\"\"\"Show results with the career mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc, ratio) return @gcf.command()", "gcf(ctx, dj, prog, time, title, expt, col, path, recent): \"\"\"\\b _____ _____ ______", "gm.kw_mining(prog_pd, u'radio_title', filter(None, title.split('#'))) time_pd = gm.timing_mining(title_pd, time[0], time[1]) final_pd = gm.recent(time_pd, recent)", "as -tb \\'date#title#dj\\'') @click.option('--path', '-pt', default='', help='Give the path to loading another cvs", "\\'的\\'.') @click.option('--time', '-t', nargs=2, default=('19850611', '20850611'), help='Search by release time, such as -t", "title.split('#'))) time_pd = gm.timing_mining(title_pd, time[0], time[1]) final_pd = gm.recent(time_pd, recent) if expt: final_pd.to_csv('./gcf.csv',", "'-r', default=2, type=float, help='Graphic Ratio. Smaller value, more width graphic.') @click.pass_context def career(ctx,", "program, such as -tt \\'的\\'.') @click.option('--time', '-t', nargs=2, default=('19850611', '20850611'), help='Search by release", "\"\"\"Show results with statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return @gcf.command() @click.pass_context def drop(ctx): \"\"\"Drop all", "u'radio_program', filter(None, prog.split('#'))) title_pd = gm.kw_mining(prog_pd, u'radio_title', filter(None, title.split('#'))) time_pd = gm.timing_mining(title_pd, time[0],", "'-s', default='Y', type=click.Choice(['Y', 'Q', 'M']), help='Choice the statistic size: Y(year),Q(quarter),M(month).') @click.option('--perc', '-p', default=False,", "cvs file') @click.option('--col', '-c', default='date#title#dj', help='Show data with specific columns as -tb \\'date#title#dj\\'')", "release time, such as -t 20180101 20180302') @click.option('--expt', '-e', default=False, is_flag=True, help='Export data", "data to a cvs file') @click.option('--col', '-c', default='date#title#dj', help='Show data with specific columns", "gs @click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj', '-d', default='', help=u'Search by dj, such as -d \\'西蒙#四十二\\'", "ctx.obj['path'] = path return if ctx.invoked_subcommand == 'statistic': ctx.obj['final_pd'] = final_pd return if", "____| / ____| | ____| | | __ | | | |__ |", "| |____ | | \\_____| \\_____| |_| G-Cores(g-cores.com) Fans --- A Gadio Info", "a cvs file') @click.option('--col', '-c', default='date#title#dj', help='Show data with specific columns as -tb", "'-c', default='date#title#dj', help='Show data with specific columns as -tb \\'date#title#dj\\'') @click.option('--path', '-pt', default='',", "utf-8 -*- import click from . import gcf_mining as gm from . import", "to the last gadio\"\"\" gs.scrape_radio_list(deep) return @gcf.command() @click.option('--size', '-s', default='Y', type=click.Choice(['Y', 'Q', 'M']),", "#, if you need multiple programs.') @click.option('--title', '-tt', default='', help=u'Search by program, such", "help='Search by release time, such as -t 20180101 20180302') @click.option('--expt', '-e', default=False, is_flag=True,", "to a cvs file') @click.option('--col', '-c', default='date#title#dj', help='Show data with specific columns as", "_____ ______ / ____| / ____| | ____| | | __ | |", "final_pd return if ctx.invoked_subcommand is None: gm.df_print(final_pd, col) return return @gcf.command() @click.option('--deep', default=False,", "default=('19850611', '20850611'), help='Search by release time, such as -t 20180101 20180302') @click.option('--expt', '-e',", "Info Tool \"\"\" if ctx.invoked_subcommand == 'update': return col = map((lambda x: 'radio_'", "deep): \"\"\"Update data to the last gadio\"\"\" gs.scrape_radio_list(deep) return @gcf.command() @click.option('--size', '-s', default='Y',", "multiple programs.') @click.option('--title', '-tt', default='', help=u'Search by program, such as -tt \\'的\\'.') @click.option('--time',", "ctx.invoked_subcommand == 'career': ctx.obj['final_pd'] = final_pd ctx.obj['path'] = path return if ctx.invoked_subcommand ==", "from . import gcf_mining as gm from . import gcf_scrape as gs @click.group(invoke_without_command=True,", "gm.kw_mining(dj_pd, u'radio_program', filter(None, prog.split('#'))) title_pd = gm.kw_mining(prog_pd, u'radio_title', filter(None, title.split('#'))) time_pd = gm.timing_mining(title_pd,", "help=u'Search by dj, such as -d \\'西蒙#四十二\\' or \\'西蒙\\'.\\ Use #, if you", "ratio) return @gcf.command() @click.pass_context def statistic(ctx): \"\"\"Show results with statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return", "@gcf.command() @click.pass_context def statistic(ctx): \"\"\"Show results with statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return @gcf.command() @click.pass_context", "help='Show the most recent gadios.') @click.pass_context def gcf(ctx, dj, prog, time, title, expt,", "None: gm.df_print(final_pd, col) return return @gcf.command() @click.option('--deep', default=False, is_flag=True, help='Reload the whole data.')", "help='Graphic Ratio. Smaller value, more width graphic.') @click.pass_context def career(ctx, size, perc, ratio):", "| | | __| | |__| | | |____ | | \\_____| \\_____|", "type=float, help='Graphic Ratio. Smaller value, more width graphic.') @click.pass_context def career(ctx, size, perc,", "as -tt \\'的\\'.') @click.option('--time', '-t', nargs=2, default=('19850611', '20850611'), help='Search by release time, such", "'M']), help='Choice the statistic size: Y(year),Q(quarter),M(month).') @click.option('--perc', '-p', default=False, is_flag=True, help='Show data with", "last gadio\"\"\" gs.scrape_radio_list(deep) return @gcf.command() @click.option('--size', '-s', default='Y', type=click.Choice(['Y', 'Q', 'M']), help='Choice the", "@click.option('--dj', '-d', default='', help=u'Search by dj, such as -d \\'西蒙#四十二\\' or \\'西蒙\\'.\\ Use", "gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc, ratio) return @gcf.command() @click.pass_context def statistic(ctx): \"\"\"Show results with", "size: Y(year),Q(quarter),M(month).') @click.option('--perc', '-p', default=False, is_flag=True, help='Show data with the percentage of whole", ". import gcf_scrape as gs @click.group(invoke_without_command=True, no_args_is_help=True) @click.option('--dj', '-d', default='', help=u'Search by dj,", "ctx.obj['path'], size, perc, ratio) return @gcf.command() @click.pass_context def statistic(ctx): \"\"\"Show results with statistic", "|_| G-Cores(g-cores.com) Fans --- A Gadio Info Tool \"\"\" if ctx.invoked_subcommand == 'update':", "gm.stat_mode(ctx.obj['final_pd']) return @gcf.command() @click.pass_context def drop(ctx): \"\"\"Drop all radio information\"\"\" gs.drop_raido() return def", "A Gadio Info Tool \"\"\" if ctx.invoked_subcommand == 'update': return col = map((lambda", "-*- coding: utf-8 -*- import click from . import gcf_mining as gm from", "@click.option('--expt', '-e', default=False, is_flag=True, help='Export data to a cvs file') @click.option('--col', '-c', default='date#title#dj',", "__ | | | |__ | | |_ | | | | __|", "is None: gm.df_print(final_pd, col) return return @gcf.command() @click.option('--deep', default=False, is_flag=True, help='Reload the whole", "@click.pass_context def drop(ctx): \"\"\"Drop all radio information\"\"\" gs.drop_raido() return def main(): gcf(obj={}) if", "set.') @click.option('--ratio', '-r', default=2, type=float, help='Graphic Ratio. Smaller value, more width graphic.') @click.pass_context", "statistic(ctx): \"\"\"Show results with statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return @gcf.command() @click.pass_context def drop(ctx): \"\"\"Drop", "if you need a dj group.') @click.option('--prog', '-p', default='', help='Search by program, such", "programs.') @click.option('--title', '-tt', default='', help=u'Search by program, such as -tt \\'的\\'.') @click.option('--time', '-t',", "path to loading another cvs data.') @click.option('--recent', '-r', default=9999, help='Show the most recent", "u'radio_title', filter(None, title.split('#'))) time_pd = gm.timing_mining(title_pd, time[0], time[1]) final_pd = gm.recent(time_pd, recent) if", "time[0], time[1]) final_pd = gm.recent(time_pd, recent) if expt: final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False) if ctx.invoked_subcommand", "filter(None, dj.split('#'))) prog_pd = gm.kw_mining(dj_pd, u'radio_program', filter(None, prog.split('#'))) title_pd = gm.kw_mining(prog_pd, u'radio_title', filter(None,", "| | __ | | | |__ | | |_ | | |", "the whole data.') @click.pass_context def update(ctx, deep): \"\"\"Update data to the last gadio\"\"\"", "= gm.recent(time_pd, recent) if expt: final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False) if ctx.invoked_subcommand == 'career': ctx.obj['final_pd']", "______ / ____| / ____| | ____| | | __ | | |", "help='Export data to a cvs file') @click.option('--col', '-c', default='date#title#dj', help='Show data with specific", "|__ | | |_ | | | | __| | |__| | |", "perc, ratio) return @gcf.command() @click.pass_context def statistic(ctx): \"\"\"Show results with statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd'])", "def drop(ctx): \"\"\"Drop all radio information\"\"\" gs.drop_raido() return def main(): gcf(obj={}) if __name__", "by program, such as -tt \\'的\\'.') @click.option('--time', '-t', nargs=2, default=('19850611', '20850611'), help='Search by", "gadio\"\"\" gs.scrape_radio_list(deep) return @gcf.command() @click.option('--size', '-s', default='Y', type=click.Choice(['Y', 'Q', 'M']), help='Choice the statistic", "most recent gadios.') @click.pass_context def gcf(ctx, dj, prog, time, title, expt, col, path,", "as -d \\'西蒙#四十二\\' or \\'西蒙\\'.\\ Use #, if you need a dj group.')", "mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc, ratio) return @gcf.command() @click.pass_context def statistic(ctx): \"\"\"Show results", "| | | | __| | |__| | | |____ | | \\_____|", "recent gadios.') @click.pass_context def gcf(ctx, dj, prog, time, title, expt, col, path, recent):", "help='Show data with specific columns as -tb \\'date#title#dj\\'') @click.option('--path', '-pt', default='', help='Give the", "| __| | |__| | | |____ | | \\_____| \\_____| |_| G-Cores(g-cores.com)", "final_pd = gm.recent(time_pd, recent) if expt: final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False) if ctx.invoked_subcommand == 'career':", "return if ctx.invoked_subcommand is None: gm.df_print(final_pd, col) return return @gcf.command() @click.option('--deep', default=False, is_flag=True,", "@click.option('--ratio', '-r', default=2, type=float, help='Graphic Ratio. Smaller value, more width graphic.') @click.pass_context def", "width graphic.') @click.pass_context def career(ctx, size, perc, ratio): \"\"\"Show results with the career", "you need multiple programs.') @click.option('--title', '-tt', default='', help=u'Search by program, such as -tt", "path return if ctx.invoked_subcommand == 'statistic': ctx.obj['final_pd'] = final_pd return if ctx.invoked_subcommand is", "____| | ____| | | __ | | | |__ | | |_", "data with the percentage of whole set.') @click.option('--ratio', '-r', default=2, type=float, help='Graphic Ratio.", "'Q', 'M']), help='Choice the statistic size: Y(year),Q(quarter),M(month).') @click.option('--perc', '-p', default=False, is_flag=True, help='Show data", "@click.option('--path', '-pt', default='', help='Give the path to loading another cvs data.') @click.option('--recent', '-r',", "@click.option('--time', '-t', nargs=2, default=('19850611', '20850611'), help='Search by release time, such as -t 20180101", "such as -t 20180101 20180302') @click.option('--expt', '-e', default=False, is_flag=True, help='Export data to a", "str(x)), filter(None, col.split('#'))) dj_pd = gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None, dj.split('#'))) prog_pd = gm.kw_mining(dj_pd, u'radio_program',", "click from . import gcf_mining as gm from . import gcf_scrape as gs", "\\'西蒙\\'.\\ Use #, if you need a dj group.') @click.option('--prog', '-p', default='', help='Search", "= gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None, dj.split('#'))) prog_pd = gm.kw_mining(dj_pd, u'radio_program', filter(None, prog.split('#'))) title_pd =", "ctx.obj['final_pd'] = final_pd ctx.obj['path'] = path return if ctx.invoked_subcommand == 'statistic': ctx.obj['final_pd'] =", "data.') @click.option('--recent', '-r', default=9999, help='Show the most recent gadios.') @click.pass_context def gcf(ctx, dj,", "\\'date#title#dj\\'') @click.option('--path', '-pt', default='', help='Give the path to loading another cvs data.') @click.option('--recent',", "help='Give the path to loading another cvs data.') @click.option('--recent', '-r', default=9999, help='Show the", "encoding='utf-8', index=False) if ctx.invoked_subcommand == 'career': ctx.obj['final_pd'] = final_pd ctx.obj['path'] = path return", "@click.option('--prog', '-p', default='', help='Search by program, such as -p \\'gadio\\'.\\ Use #, if", "of whole set.') @click.option('--ratio', '-r', default=2, type=float, help='Graphic Ratio. Smaller value, more width", "if ctx.invoked_subcommand == 'statistic': ctx.obj['final_pd'] = final_pd return if ctx.invoked_subcommand is None: gm.df_print(final_pd,", "| \\_____| \\_____| |_| G-Cores(g-cores.com) Fans --- A Gadio Info Tool \"\"\" if", "the career mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc, ratio) return @gcf.command() @click.pass_context def statistic(ctx):", "coding: utf-8 -*- import click from . import gcf_mining as gm from .", "|____ | | \\_____| \\_____| |_| G-Cores(g-cores.com) Fans --- A Gadio Info Tool", "no_args_is_help=True) @click.option('--dj', '-d', default='', help=u'Search by dj, such as -d \\'西蒙#四十二\\' or \\'西蒙\\'.\\", "20180101 20180302') @click.option('--expt', '-e', default=False, is_flag=True, help='Export data to a cvs file') @click.option('--col',", "\\_____| |_| G-Cores(g-cores.com) Fans --- A Gadio Info Tool \"\"\" if ctx.invoked_subcommand ==", "default='', help=u'Search by program, such as -tt \\'的\\'.') @click.option('--time', '-t', nargs=2, default=('19850611', '20850611'),", "@gcf.command() @click.option('--size', '-s', default='Y', type=click.Choice(['Y', 'Q', 'M']), help='Choice the statistic size: Y(year),Q(quarter),M(month).') @click.option('--perc',", "@gcf.command() @click.pass_context def drop(ctx): \"\"\"Drop all radio information\"\"\" gs.drop_raido() return def main(): gcf(obj={})", "title_pd = gm.kw_mining(prog_pd, u'radio_title', filter(None, title.split('#'))) time_pd = gm.timing_mining(title_pd, time[0], time[1]) final_pd =", "@click.option('--col', '-c', default='date#title#dj', help='Show data with specific columns as -tb \\'date#title#dj\\'') @click.option('--path', '-pt',", "expt, col, path, recent): \"\"\"\\b _____ _____ ______ / ____| / ____| |", "more width graphic.') @click.pass_context def career(ctx, size, perc, ratio): \"\"\"Show results with the", "python # -*- coding: utf-8 -*- import click from . import gcf_mining as", "time_pd = gm.timing_mining(title_pd, time[0], time[1]) final_pd = gm.recent(time_pd, recent) if expt: final_pd.to_csv('./gcf.csv', encoding='utf-8',", "-t 20180101 20180302') @click.option('--expt', '-e', default=False, is_flag=True, help='Export data to a cvs file')", "data.') @click.pass_context def update(ctx, deep): \"\"\"Update data to the last gadio\"\"\" gs.scrape_radio_list(deep) return", "file') @click.option('--col', '-c', default='date#title#dj', help='Show data with specific columns as -tb \\'date#title#dj\\'') @click.option('--path',", "such as -p \\'gadio\\'.\\ Use #, if you need multiple programs.') @click.option('--title', '-tt',", "dj_pd = gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None, dj.split('#'))) prog_pd = gm.kw_mining(dj_pd, u'radio_program', filter(None, prog.split('#'))) title_pd", "mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return @gcf.command() @click.pass_context def drop(ctx): \"\"\"Drop all radio information\"\"\" gs.drop_raido() return", "Use #, if you need a dj group.') @click.option('--prog', '-p', default='', help='Search by", "= gm.kw_mining(prog_pd, u'radio_title', filter(None, title.split('#'))) time_pd = gm.timing_mining(title_pd, time[0], time[1]) final_pd = gm.recent(time_pd,", "by release time, such as -t 20180101 20180302') @click.option('--expt', '-e', default=False, is_flag=True, help='Export", "Fans --- A Gadio Info Tool \"\"\" if ctx.invoked_subcommand == 'update': return col", "update(ctx, deep): \"\"\"Update data to the last gadio\"\"\" gs.scrape_radio_list(deep) return @gcf.command() @click.option('--size', '-s',", "u'radio_dj', filter(None, dj.split('#'))) prog_pd = gm.kw_mining(dj_pd, u'radio_program', filter(None, prog.split('#'))) title_pd = gm.kw_mining(prog_pd, u'radio_title',", "'-r', default=9999, help='Show the most recent gadios.') @click.pass_context def gcf(ctx, dj, prog, time,", "\\_____| \\_____| |_| G-Cores(g-cores.com) Fans --- A Gadio Info Tool \"\"\" if ctx.invoked_subcommand", "help=u'Search by program, such as -tt \\'的\\'.') @click.option('--time', '-t', nargs=2, default=('19850611', '20850611'), help='Search", "prog_pd = gm.kw_mining(dj_pd, u'radio_program', filter(None, prog.split('#'))) title_pd = gm.kw_mining(prog_pd, u'radio_title', filter(None, title.split('#'))) time_pd", "= gm.timing_mining(title_pd, time[0], time[1]) final_pd = gm.recent(time_pd, recent) if expt: final_pd.to_csv('./gcf.csv', encoding='utf-8', index=False)", "x: 'radio_' + str(x)), filter(None, col.split('#'))) dj_pd = gm.kw_mining(gm.df_pk(path), u'radio_dj', filter(None, dj.split('#'))) prog_pd", "help='Search by program, such as -p \\'gadio\\'.\\ Use #, if you need multiple", "-p \\'gadio\\'.\\ Use #, if you need multiple programs.') @click.option('--title', '-tt', default='', help=u'Search", "-tb \\'date#title#dj\\'') @click.option('--path', '-pt', default='', help='Give the path to loading another cvs data.')", "whole set.') @click.option('--ratio', '-r', default=2, type=float, help='Graphic Ratio. Smaller value, more width graphic.')", "need a dj group.') @click.option('--prog', '-p', default='', help='Search by program, such as -p", "ratio): \"\"\"Show results with the career mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc, ratio) return", "path, recent): \"\"\"\\b _____ _____ ______ / ____| / ____| | ____| |", "statistic mode\"\"\" gm.stat_mode(ctx.obj['final_pd']) return @gcf.command() @click.pass_context def drop(ctx): \"\"\"Drop all radio information\"\"\" gs.drop_raido()", "perc, ratio): \"\"\"Show results with the career mode\"\"\" gm.care_mode(ctx.obj['final_pd'], ctx.obj['path'], size, perc, ratio)", "if you need multiple programs.') @click.option('--title', '-tt', default='', help=u'Search by program, such as", "if ctx.invoked_subcommand == 'career': ctx.obj['final_pd'] = final_pd ctx.obj['path'] = path return if ctx.invoked_subcommand", "columns as -tb \\'date#title#dj\\'') @click.option('--path', '-pt', default='', help='Give the path to loading another", "to loading another cvs data.') @click.option('--recent', '-r', default=9999, help='Show the most recent gadios.')", "'-d', default='', help=u'Search by dj, such as -d \\'西蒙#四十二\\' or \\'西蒙\\'.\\ Use #,", "the path to loading another cvs data.') @click.option('--recent', '-r', default=9999, help='Show the most", "recent): \"\"\"\\b _____ _____ ______ / ____| / ____| | ____| | |", "Ratio. Smaller value, more width graphic.') @click.pass_context def career(ctx, size, perc, ratio): \"\"\"Show", "-*- import click from . import gcf_mining as gm from . import gcf_scrape", "|__| | | |____ | | \\_____| \\_____| |_| G-Cores(g-cores.com) Fans --- A", "'20850611'), help='Search by release time, such as -t 20180101 20180302') @click.option('--expt', '-e', default=False,", "@click.option('--title', '-tt', default='', help=u'Search by program, such as -tt \\'的\\'.') @click.option('--time', '-t', nargs=2,", "as -t 20180101 20180302') @click.option('--expt', '-e', default=False, is_flag=True, help='Export data to a cvs", "the statistic size: Y(year),Q(quarter),M(month).') @click.option('--perc', '-p', default=False, is_flag=True, help='Show data with the percentage", "Smaller value, more width graphic.') @click.pass_context def career(ctx, size, perc, ratio): \"\"\"Show results" ]
[ "response.status_code = 400 return response res = traffic.save_trafficObject(body) if (res['status'] == 200): response", "ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0 (the", "KIND, either express or implied. # See the License for the specific language", "Unless required by applicable law or agreed to in writing, software # distributed", "methods=['POST']) def generate_tgo(): try: body = json.loads(request.data) except ValueError: response = jsonify(\"JSON format", "return response res = traffic.save_trafficObject(body) if (res['status'] == 200): response = jsonify({\"resource_uuid\": res['uuid']})", "import traffic LOG = logging.getLogger(os.path.basename(__file__)) app = Flask(__name__) # Generate traffic generation object", "traffic generation objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def get_list(): res = traffic.list_trafficObjects() response = jsonify(res['data'])", "jsonify({\"resource_uuid\": res['uuid']}) else: response = jsonify(res['message']) response.status_code = res['status'] return response # Get", "get traffic flow status return \"Getting traffic flow status from id \" +", "error in request parameters\") response.status_code = 400 return response res = traffic.save_trafficObject(body) if", "TODO create a traffic flow from a traffic generation object return \"Creating traffic", "import logging import simplejson as json from flask import Flask, jsonify, request from", "this file except in compliance with the License. # You may obtain a", "funded by the European Commission under Grant number 761493 through # the Horizon", "def get_tgo(resource_uuid): res = traffic.get_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return response", "2020 and 5G-PPP programmes. The authors would like to # acknowledge the contributions", "Start/Stops existing traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def manage_flow(flow_uuid): # TODO start or stop", "# Get list of traffic generation objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def get_list(): res =", "authors would like to # acknowledge the contributions of their colleagues of the", "ANY KIND, either express or implied. # See the License for the specific", "request from tngsdk.traffic import traffic LOG = logging.getLogger(os.path.basename(__file__)) app = Flask(__name__) # Generate", "to endorse or promote # products derived from this software without specific prior", "traffic flow from a traffic generation object return \"Creating traffic flow from existing", "generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def generate_flow(resource_uuid): # TODO create a traffic flow from", "response.status_code = res['status'] return response # Delete traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def", "(res['status'] == 200): response = jsonify({\"resource_uuid\": res['uuid']}) else: response = jsonify(res['message']) response.status_code =", "a traffic generation object return \"Creating traffic flow from existing traffic generation object", "@app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def generate_flow(resource_uuid): # TODO create a traffic flow from a traffic", "# Get traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def get_tgo(resource_uuid): res = traffic.get_trafficObject(resource_uuid) response", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See", "# ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0", "# Generate traffic generation object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def generate_tgo(): try: body = json.loads(request.data)", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "would like to # acknowledge the contributions of their colleagues of the SONATA", "colleagues of the SONATA # partner consortium (www.5gtango.eu). import os import logging import", "get_tgo(resource_uuid): res = traffic.get_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return response #", "object return \"Creating traffic flow from existing traffic generation object \\ with id", "get_status(flow_uuid): # TODO get traffic flow status return \"Getting traffic flow status from", "+ str(flow_uuid) # Start/Stops existing traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def manage_flow(flow_uuid): # TODO", "OF ANY KIND, either express or implied. # See the License for the", "response # Delete traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def delete_tgo(resource_uuid): res = traffic.delete_trafficObject(resource_uuid)", "methods=['POST']) def generate_flow(resource_uuid): # TODO create a traffic flow from a traffic generation", "QUOBIS SL. # nor the names of its contributors may be used to", "\"Deleting traffic flow with id \" + str(flow_uuid) def serve(args): app.run(host=args.service_address, port=int(args.service_port), debug=args.verbose)", "of the 5GTANGO project, # funded by the European Commission under Grant number", "def generate_flow(resource_uuid): # TODO create a traffic flow from a traffic generation object", "flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def manage_flow(flow_uuid): # TODO start or stop a traffic flow", "object \\ with id \" + str(resource_uuid) # Get traffic flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>',", "Delete traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def delete_tgo(resource_uuid): res = traffic.delete_trafficObject(resource_uuid) response =", "under Grant number 761493 through # the Horizon 2020 and 5G-PPP programmes. The", "flow from a traffic generation object return \"Creating traffic flow from existing traffic", "400 return response res = traffic.save_trafficObject(body) if (res['status'] == 200): response = jsonify({\"resource_uuid\":", "import Flask, jsonify, request from tngsdk.traffic import traffic LOG = logging.getLogger(os.path.basename(__file__)) app =", "remove_flow(flow_uuid): # TODO remove a traffic flow return \"Deleting traffic flow with id", "jsonify(res['message']) response.status_code = res['status'] return response # Get list of traffic generation objects", "methods=['GET']) def get_tgo(resource_uuid): res = traffic.get_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return", "object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def get_tgo(resource_uuid): res = traffic.get_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code =", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "delete_tgo(resource_uuid): res = traffic.delete_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return response #", "traffic flow with id \" + str(flow_uuid) def serve(args): app.run(host=args.service_address, port=int(args.service_port), debug=args.verbose) return", "TODO remove a traffic flow return \"Deleting traffic flow with id \" +", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "response res = traffic.save_trafficObject(body) if (res['status'] == 200): response = jsonify({\"resource_uuid\": res['uuid']}) else:", "res = traffic.get_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return response # Delete", "= logging.getLogger(os.path.basename(__file__)) app = Flask(__name__) # Generate traffic generation object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def", "@app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def get_status(flow_uuid): # TODO get traffic flow status return \"Getting traffic", "or stop a traffic flow return \"Starting/Stopping traffic flow with id \" +", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "traffic flow status from id \" + str(flow_uuid) # Start/Stops existing traffic flow", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "import os import logging import simplejson as json from flask import Flask, jsonify,", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "# TODO create a traffic flow from a traffic generation object return \"Creating", "= jsonify(res['data']) response.status_code = res['status'] return response # Delete traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>',", "traffic.save_trafficObject(body) if (res['status'] == 200): response = jsonify({\"resource_uuid\": res['uuid']}) else: response = jsonify(res['message'])", "+ str(resource_uuid) # Get traffic flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def get_status(flow_uuid): # TODO", "required by applicable law or agreed to in writing, software # distributed under", "LOG = logging.getLogger(os.path.basename(__file__)) app = Flask(__name__) # Generate traffic generation object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST'])", "applicable law or agreed to in writing, software # distributed under the License", "traffic flow with id \" + str(flow_uuid) # Removes traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE'])", "or agreed to in writing, software # distributed under the License is distributed", "specific prior written # permission. # # This work has been performed in", "str(flow_uuid) # Start/Stops existing traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def manage_flow(flow_uuid): # TODO start", "This work has been performed in the framework of the 5GTANGO project, #", "jsonify(res['data']) response.status_code = res['status'] return response # Create traffic flow from existing traffic", "CONDITIONS OF ANY KIND, either express or implied. # See the License for", "under the License. # # Neither the name of the 5GTANGO, QUOBIS SL.", "partner consortium (www.5gtango.eu). import os import logging import simplejson as json from flask", "5GTANGO, QUOBIS SL. # nor the names of its contributors may be used", "response # Create traffic flow from existing traffic generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def", "response # Get list of traffic generation objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def get_list(): res", "return \"Deleting traffic flow with id \" + str(flow_uuid) def serve(args): app.run(host=args.service_address, port=int(args.service_port),", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "create a traffic flow from a traffic generation object return \"Creating traffic flow", "prior written # permission. # # This work has been performed in the", "writing, software # distributed under the License is distributed on an \"AS IS\"", "traffic LOG = logging.getLogger(os.path.basename(__file__)) app = Flask(__name__) # Generate traffic generation object @app.route('/api/trafficgen/v1/trafficObject',", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "methods=['GET']) def get_list(): res = traffic.list_trafficObjects() response = jsonify(res['data']) response.status_code = res['status'] return", "License. # You may obtain a copy of the License at # #", "the 5GTANGO project, # funded by the European Commission under Grant number 761493", "the Horizon 2020 and 5G-PPP programmes. The authors would like to # acknowledge", "response.status_code = res['status'] return response # Get traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def", "return \"Starting/Stopping traffic flow with id \" + str(flow_uuid) # Removes traffic flow", "str(flow_uuid) # Removes traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def remove_flow(flow_uuid): # TODO remove a", "# TODO remove a traffic flow return \"Deleting traffic flow with id \"", "flow from existing traffic generation object \\ with id \" + str(resource_uuid) #", "its contributors may be used to endorse or promote # products derived from", "res['status'] return response # Get list of traffic generation objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def", "response = jsonify(\"JSON format error in request parameters\") response.status_code = 400 return response", "compliance with the License. # You may obtain a copy of the License", "# # This work has been performed in the framework of the 5GTANGO", "and 5G-PPP programmes. The authors would like to # acknowledge the contributions of", "flow from existing traffic generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def generate_flow(resource_uuid): # TODO create", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "== 200): response = jsonify({\"resource_uuid\": res['uuid']}) else: response = jsonify(res['message']) response.status_code = res['status']", "Removes traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def remove_flow(flow_uuid): # TODO remove a traffic flow", "# products derived from this software without specific prior written # permission. #", "response = jsonify(res['data']) response.status_code = res['status'] return response # Delete traffic generation object", "# TODO get traffic flow status return \"Getting traffic flow status from id", "contributions of their colleagues of the SONATA # partner consortium (www.5gtango.eu). import os", "def delete_tgo(resource_uuid): res = traffic.delete_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return response", "Generate traffic generation object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def generate_tgo(): try: body = json.loads(request.data) except", "response.status_code = res['status'] return response # Create traffic flow from existing traffic generation", "@app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def manage_flow(flow_uuid): # TODO start or stop a traffic flow return", "name of the 5GTANGO, QUOBIS SL. # nor the names of its contributors", "flow return \"Starting/Stopping traffic flow with id \" + str(flow_uuid) # Removes traffic", "start or stop a traffic flow return \"Starting/Stopping traffic flow with id \"", "if (res['status'] == 200): response = jsonify({\"resource_uuid\": res['uuid']}) else: response = jsonify(res['message']) response.status_code", "remove a traffic flow return \"Deleting traffic flow with id \" + str(flow_uuid)", "not use this file except in compliance with the License. # You may", "traffic generation object return \"Creating traffic flow from existing traffic generation object \\", "by the European Commission under Grant number 761493 through # the Horizon 2020", "logging.getLogger(os.path.basename(__file__)) app = Flask(__name__) # Generate traffic generation object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def generate_tgo():", "generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def delete_tgo(resource_uuid): res = traffic.delete_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code", "flow status from id \" + str(flow_uuid) # Start/Stops existing traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>',", "of the 5GTANGO, QUOBIS SL. # nor the names of its contributors may", "License, Version 2.0 (the \"License\"); # you may not use this file except", "flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def get_status(flow_uuid): # TODO get traffic flow status return", "RESERVED. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "@app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def delete_tgo(resource_uuid): res = traffic.delete_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status']", "Get traffic flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def get_status(flow_uuid): # TODO get traffic flow", "= res['status'] return response # Get traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def get_tgo(resource_uuid):", "res = traffic.delete_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return response # Create", "res = traffic.save_trafficObject(body) if (res['status'] == 200): response = jsonify({\"resource_uuid\": res['uuid']}) else: response", "# you may not use this file except in compliance with the License.", "# limitations under the License. # # Neither the name of the 5GTANGO,", "acknowledge the contributions of their colleagues of the SONATA # partner consortium (www.5gtango.eu).", "agreed to in writing, software # distributed under the License is distributed on", "like to # acknowledge the contributions of their colleagues of the SONATA #", "(the \"License\"); # you may not use this file except in compliance with", "json from flask import Flask, jsonify, request from tngsdk.traffic import traffic LOG =", "return response # Delete traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def delete_tgo(resource_uuid): res =", "Get traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def get_tgo(resource_uuid): res = traffic.get_trafficObject(resource_uuid) response =", "the 5GTANGO, QUOBIS SL. # nor the names of its contributors may be", "get_list(): res = traffic.list_trafficObjects() response = jsonify(res['data']) response.status_code = res['status'] return response #", "# Unless required by applicable law or agreed to in writing, software #", "status return \"Getting traffic flow status from id \" + str(flow_uuid) # Start/Stops", "response.status_code = res['status'] return response # Get list of traffic generation objects @app.route('/api/trafficgen/v1/trafficObject',", "by applicable law or agreed to in writing, software # distributed under the", "Flask(__name__) # Generate traffic generation object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def generate_tgo(): try: body =", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "res['uuid']}) else: response = jsonify(res['message']) response.status_code = res['status'] return response # Get list", "def remove_flow(flow_uuid): # TODO remove a traffic flow return \"Deleting traffic flow with", "traffic flow status return \"Getting traffic flow status from id \" + str(flow_uuid)", "# permission. # # This work has been performed in the framework of", "= jsonify(\"JSON format error in request parameters\") response.status_code = 400 return response res", "# Create traffic flow from existing traffic generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def generate_flow(resource_uuid):", "file except in compliance with the License. # You may obtain a copy", "Grant number 761493 through # the Horizon 2020 and 5G-PPP programmes. The authors", "TODO start or stop a traffic flow return \"Starting/Stopping traffic flow with id", "\\ with id \" + str(resource_uuid) # Get traffic flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET'])", "governing permissions and # limitations under the License. # # Neither the name", "def get_list(): res = traffic.list_trafficObjects() response = jsonify(res['data']) response.status_code = res['status'] return response", "and # limitations under the License. # # Neither the name of the", "object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def generate_flow(resource_uuid): # TODO create a traffic flow from a", "\"Starting/Stopping traffic flow with id \" + str(flow_uuid) # Removes traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>',", "Flask, jsonify, request from tngsdk.traffic import traffic LOG = logging.getLogger(os.path.basename(__file__)) app = Flask(__name__)", "License for the specific language governing permissions and # limitations under the License.", "response = jsonify(res['message']) response.status_code = res['status'] return response # Get list of traffic", "from tngsdk.traffic import traffic LOG = logging.getLogger(os.path.basename(__file__)) app = Flask(__name__) # Generate traffic", "with id \" + str(resource_uuid) # Get traffic flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def", "to in writing, software # distributed under the License is distributed on an", "existing traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def manage_flow(flow_uuid): # TODO start or stop a", "<gh_stars>0 # Copyright (c) 2018 5GTANGO, QUOBIS SL. # ALL RIGHTS RESERVED. #", "implied. # See the License for the specific language governing permissions and #", "= traffic.list_trafficObjects() response = jsonify(res['data']) response.status_code = res['status'] return response # Get traffic", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "promote # products derived from this software without specific prior written # permission.", "= traffic.save_trafficObject(body) if (res['status'] == 200): response = jsonify({\"resource_uuid\": res['uuid']}) else: response =", "5GTANGO, QUOBIS SL. # ALL RIGHTS RESERVED. # # Licensed under the Apache", "traffic.list_trafficObjects() response = jsonify(res['data']) response.status_code = res['status'] return response # Get traffic generation", "generation object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def generate_tgo(): try: body = json.loads(request.data) except ValueError: response", "5GTANGO project, # funded by the European Commission under Grant number 761493 through", "res['status'] return response # Delete traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def delete_tgo(resource_uuid): res", "@app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def generate_tgo(): try: body = json.loads(request.data) except ValueError: response = jsonify(\"JSON", "761493 through # the Horizon 2020 and 5G-PPP programmes. The authors would like", "status from id \" + str(flow_uuid) # Start/Stops existing traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT'])", "or implied. # See the License for the specific language governing permissions and", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "= res['status'] return response # Get list of traffic generation objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET'])", "OR CONDITIONS OF ANY KIND, either express or implied. # See the License", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "permissions and # limitations under the License. # # Neither the name of", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "= res['status'] return response # Create traffic flow from existing traffic generation object", "methods=['PUT']) def manage_flow(flow_uuid): # TODO start or stop a traffic flow return \"Starting/Stopping", "try: body = json.loads(request.data) except ValueError: response = jsonify(\"JSON format error in request", "a traffic flow from a traffic generation object return \"Creating traffic flow from", "flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def remove_flow(flow_uuid): # TODO remove a traffic flow return \"Deleting", "the European Commission under Grant number 761493 through # the Horizon 2020 and", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "methods=['DELETE']) def remove_flow(flow_uuid): # TODO remove a traffic flow return \"Deleting traffic flow", "# Get traffic flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def get_status(flow_uuid): # TODO get traffic", "the name of the 5GTANGO, QUOBIS SL. # nor the names of its", "traffic generation object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def generate_tgo(): try: body = json.loads(request.data) except ValueError:", "= traffic.delete_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return response # Create traffic", "# Copyright (c) 2018 5GTANGO, QUOBIS SL. # ALL RIGHTS RESERVED. # #", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "return response # Create traffic flow from existing traffic generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST'])", "def manage_flow(flow_uuid): # TODO start or stop a traffic flow return \"Starting/Stopping traffic", "methods=['DELETE']) def delete_tgo(resource_uuid): res = traffic.delete_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return", "SONATA # partner consortium (www.5gtango.eu). import os import logging import simplejson as json", "object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def delete_tgo(resource_uuid): res = traffic.delete_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code =", "traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def get_tgo(resource_uuid): res = traffic.get_trafficObject(resource_uuid) response = jsonify(res['data'])", "the framework of the 5GTANGO project, # funded by the European Commission under", "from flask import Flask, jsonify, request from tngsdk.traffic import traffic LOG = logging.getLogger(os.path.basename(__file__))", "methods=['GET']) def get_status(flow_uuid): # TODO get traffic flow status return \"Getting traffic flow", "\"Creating traffic flow from existing traffic generation object \\ with id \" +", "use this file except in compliance with the License. # You may obtain", "# Removes traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def remove_flow(flow_uuid): # TODO remove a traffic", "= jsonify(res['data']) response.status_code = res['status'] return response # Create traffic flow from existing", "generation objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def get_list(): res = traffic.list_trafficObjects() response = jsonify(res['data']) response.status_code", "+ str(flow_uuid) # Removes traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def remove_flow(flow_uuid): # TODO remove", "used to endorse or promote # products derived from this software without specific", "be used to endorse or promote # products derived from this software without", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "from this software without specific prior written # permission. # # This work", "id \" + str(flow_uuid) # Start/Stops existing traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def manage_flow(flow_uuid):", "contributors may be used to endorse or promote # products derived from this", "generation object \\ with id \" + str(resource_uuid) # Get traffic flow status", "2.0 (the \"License\"); # you may not use this file except in compliance", "License. # # Neither the name of the 5GTANGO, QUOBIS SL. # nor", "for the specific language governing permissions and # limitations under the License. #", "# Start/Stops existing traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def manage_flow(flow_uuid): # TODO start or", "the contributions of their colleagues of the SONATA # partner consortium (www.5gtango.eu). import", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the", "format error in request parameters\") response.status_code = 400 return response res = traffic.save_trafficObject(body)", "traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def manage_flow(flow_uuid): # TODO start or stop a traffic", "# nor the names of its contributors may be used to endorse or", "# # Unless required by applicable law or agreed to in writing, software", "app = Flask(__name__) # Generate traffic generation object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def generate_tgo(): try:", "express or implied. # See the License for the specific language governing permissions", "specific language governing permissions and # limitations under the License. # # Neither", "request parameters\") response.status_code = 400 return response res = traffic.save_trafficObject(body) if (res['status'] ==", "# Neither the name of the 5GTANGO, QUOBIS SL. # nor the names", "res = traffic.list_trafficObjects() response = jsonify(res['data']) response.status_code = res['status'] return response # Get", "either express or implied. # See the License for the specific language governing", "generation object return \"Creating traffic flow from existing traffic generation object \\ with", "limitations under the License. # # Neither the name of the 5GTANGO, QUOBIS", "framework of the 5GTANGO project, # funded by the European Commission under Grant", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "stop a traffic flow return \"Starting/Stopping traffic flow with id \" + str(flow_uuid)", "may be used to endorse or promote # products derived from this software", "a traffic flow return \"Deleting traffic flow with id \" + str(flow_uuid) def", "of traffic generation objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def get_list(): res = traffic.list_trafficObjects() response =", "Neither the name of the 5GTANGO, QUOBIS SL. # nor the names of", "res['status'] return response # Get traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def get_tgo(resource_uuid): res", "traffic.delete_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return response # Create traffic flow", "the names of its contributors may be used to endorse or promote #", "The authors would like to # acknowledge the contributions of their colleagues of", "flow status return \"Getting traffic flow status from id \" + str(flow_uuid) #", "has been performed in the framework of the 5GTANGO project, # funded by", "json.loads(request.data) except ValueError: response = jsonify(\"JSON format error in request parameters\") response.status_code =", "traffic flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def get_status(flow_uuid): # TODO get traffic flow status", "the License. # You may obtain a copy of the License at #", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "Create traffic flow from existing traffic generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def generate_flow(resource_uuid): #", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "their colleagues of the SONATA # partner consortium (www.5gtango.eu). import os import logging", "traffic flow from existing traffic generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def generate_flow(resource_uuid): # TODO", "manage_flow(flow_uuid): # TODO start or stop a traffic flow return \"Starting/Stopping traffic flow", "else: response = jsonify(res['message']) response.status_code = res['status'] return response # Get list of", "traffic flow from existing traffic generation object \\ with id \" + str(resource_uuid)", "existing traffic generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def generate_flow(resource_uuid): # TODO create a traffic", "number 761493 through # the Horizon 2020 and 5G-PPP programmes. The authors would", "response = jsonify({\"resource_uuid\": res['uuid']}) else: response = jsonify(res['message']) response.status_code = res['status'] return response", "from id \" + str(flow_uuid) # Start/Stops existing traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def", "= jsonify({\"resource_uuid\": res['uuid']}) else: response = jsonify(res['message']) response.status_code = res['status'] return response #", "traffic generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def generate_flow(resource_uuid): # TODO create a traffic flow", "jsonify(res['data']) response.status_code = res['status'] return response # Delete traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE'])", "logging import simplejson as json from flask import Flask, jsonify, request from tngsdk.traffic", "simplejson as json from flask import Flask, jsonify, request from tngsdk.traffic import traffic", "parameters\") response.status_code = 400 return response res = traffic.save_trafficObject(body) if (res['status'] == 200):", "to # acknowledge the contributions of their colleagues of the SONATA # partner", "nor the names of its contributors may be used to endorse or promote", "of its contributors may be used to endorse or promote # products derived", "European Commission under Grant number 761493 through # the Horizon 2020 and 5G-PPP", "= res['status'] return response # Delete traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def delete_tgo(resource_uuid):", "jsonify(res['data']) response.status_code = res['status'] return response # Get traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET'])", "Get list of traffic generation objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def get_list(): res = traffic.list_trafficObjects()", "of their colleagues of the SONATA # partner consortium (www.5gtango.eu). import os import", "@app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def remove_flow(flow_uuid): # TODO remove a traffic flow return \"Deleting traffic", "project, # funded by the European Commission under Grant number 761493 through #", "or promote # products derived from this software without specific prior written #", "(www.5gtango.eu). import os import logging import simplejson as json from flask import Flask,", "with the License. # You may obtain a copy of the License at", "return \"Getting traffic flow status from id \" + str(flow_uuid) # Start/Stops existing", "# partner consortium (www.5gtango.eu). import os import logging import simplejson as json from", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "a traffic flow return \"Starting/Stopping traffic flow with id \" + str(flow_uuid) #", "res['status'] return response # Create traffic flow from existing traffic generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>',", "5G-PPP programmes. The authors would like to # acknowledge the contributions of their", "body = json.loads(request.data) except ValueError: response = jsonify(\"JSON format error in request parameters\")", "return response # Get traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def get_tgo(resource_uuid): res =", "work has been performed in the framework of the 5GTANGO project, # funded", "of the SONATA # partner consortium (www.5gtango.eu). import os import logging import simplejson", "= 400 return response res = traffic.save_trafficObject(body) if (res['status'] == 200): response =", "law or agreed to in writing, software # distributed under the License is", "list of traffic generation objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def get_list(): res = traffic.list_trafficObjects() response", "the License for the specific language governing permissions and # limitations under the", "from existing traffic generation object \\ with id \" + str(resource_uuid) # Get", "from existing traffic generation object @app.route('/api/trafficgen/v1/flows/<int:resource_uuid>', methods=['POST']) def generate_flow(resource_uuid): # TODO create a", "= jsonify(res['message']) response.status_code = res['status'] return response # Get list of traffic generation", "generate_flow(resource_uuid): # TODO create a traffic flow from a traffic generation object return", "\" + str(flow_uuid) # Start/Stops existing traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['PUT']) def manage_flow(flow_uuid): #", "return \"Creating traffic flow from existing traffic generation object \\ with id \"", "from a traffic generation object return \"Creating traffic flow from existing traffic generation", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "the License. # # Neither the name of the 5GTANGO, QUOBIS SL. #", "tngsdk.traffic import traffic LOG = logging.getLogger(os.path.basename(__file__)) app = Flask(__name__) # Generate traffic generation", "been performed in the framework of the 5GTANGO project, # funded by the", "Commission under Grant number 761493 through # the Horizon 2020 and 5G-PPP programmes.", "consortium (www.5gtango.eu). import os import logging import simplejson as json from flask import", "= jsonify(res['data']) response.status_code = res['status'] return response # Get traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>',", "id \" + str(flow_uuid) # Removes traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def remove_flow(flow_uuid): #", "SL. # nor the names of its contributors may be used to endorse", "@app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def get_tgo(resource_uuid): res = traffic.get_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status']", "os import logging import simplejson as json from flask import Flask, jsonify, request", "traffic flow return \"Starting/Stopping traffic flow with id \" + str(flow_uuid) # Removes", "(c) 2018 5GTANGO, QUOBIS SL. # ALL RIGHTS RESERVED. # # Licensed under", "in compliance with the License. # You may obtain a copy of the", "Copyright (c) 2018 5GTANGO, QUOBIS SL. # ALL RIGHTS RESERVED. # # Licensed", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "as json from flask import Flask, jsonify, request from tngsdk.traffic import traffic LOG", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #", "= traffic.get_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return response # Delete traffic", "@app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def get_list(): res = traffic.list_trafficObjects() response = jsonify(res['data']) response.status_code = res['status']", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "TODO get traffic flow status return \"Getting traffic flow status from id \"", "in request parameters\") response.status_code = 400 return response res = traffic.save_trafficObject(body) if (res['status']", "generate_tgo(): try: body = json.loads(request.data) except ValueError: response = jsonify(\"JSON format error in", "See the License for the specific language governing permissions and # limitations under", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "# TODO start or stop a traffic flow return \"Starting/Stopping traffic flow with", "Horizon 2020 and 5G-PPP programmes. The authors would like to # acknowledge the", "traffic flow return \"Deleting traffic flow with id \" + str(flow_uuid) def serve(args):", "objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def get_list(): res = traffic.list_trafficObjects() response = jsonify(res['data']) response.status_code =", "RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "# Delete traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def delete_tgo(resource_uuid): res = traffic.delete_trafficObject(resource_uuid) response", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "traffic.get_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code = res['status'] return response # Delete traffic generation", "# # Neither the name of the 5GTANGO, QUOBIS SL. # nor the", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "language governing permissions and # limitations under the License. # # Neither the", "traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['DELETE']) def delete_tgo(resource_uuid): res = traffic.delete_trafficObject(resource_uuid) response = jsonify(res['data'])", "object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def generate_tgo(): try: body = json.loads(request.data) except ValueError: response =", "jsonify(\"JSON format error in request parameters\") response.status_code = 400 return response res =", "flow with id \" + str(flow_uuid) # Removes traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def", "= json.loads(request.data) except ValueError: response = jsonify(\"JSON format error in request parameters\") response.status_code", "endorse or promote # products derived from this software without specific prior written", "programmes. The authors would like to # acknowledge the contributions of their colleagues", "str(resource_uuid) # Get traffic flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def get_status(flow_uuid): # TODO get", "response = jsonify(res['data']) response.status_code = res['status'] return response # Create traffic flow from", "import simplejson as json from flask import Flask, jsonify, request from tngsdk.traffic import", "= Flask(__name__) # Generate traffic generation object @app.route('/api/trafficgen/v1/trafficObject', methods=['POST']) def generate_tgo(): try: body", "response = jsonify(res['data']) response.status_code = res['status'] return response # Get traffic generation object", "flow return \"Deleting traffic flow with id \" + str(flow_uuid) def serve(args): app.run(host=args.service_address,", "ValueError: response = jsonify(\"JSON format error in request parameters\") response.status_code = 400 return", "without specific prior written # permission. # # This work has been performed", "status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def get_status(flow_uuid): # TODO get traffic flow status return \"Getting", "generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def get_tgo(resource_uuid): res = traffic.get_trafficObject(resource_uuid) response = jsonify(res['data']) response.status_code", "Version 2.0 (the \"License\"); # you may not use this file except in", "software without specific prior written # permission. # # This work has been", "except in compliance with the License. # You may obtain a copy of", "response # Get traffic generation object @app.route('/api/trafficgen/v1/trafficObject/<resource_uuid>', methods=['GET']) def get_tgo(resource_uuid): res = traffic.get_trafficObject(resource_uuid)", "# the Horizon 2020 and 5G-PPP programmes. The authors would like to #", "the SONATA # partner consortium (www.5gtango.eu). import os import logging import simplejson as", "\"Getting traffic flow status from id \" + str(flow_uuid) # Start/Stops existing traffic", "return response # Get list of traffic generation objects @app.route('/api/trafficgen/v1/trafficObject', methods=['GET']) def get_list():", "2018 5GTANGO, QUOBIS SL. # ALL RIGHTS RESERVED. # # Licensed under the", "permission. # # This work has been performed in the framework of the", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "# This work has been performed in the framework of the 5GTANGO project,", "in the framework of the 5GTANGO project, # funded by the European Commission", "SL. # ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version", "def generate_tgo(): try: body = json.loads(request.data) except ValueError: response = jsonify(\"JSON format error", "performed in the framework of the 5GTANGO project, # funded by the European", "this software without specific prior written # permission. # # This work has", "with id \" + str(flow_uuid) # Removes traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def remove_flow(flow_uuid):", "traffic generation object \\ with id \" + str(resource_uuid) # Get traffic flow", "existing traffic generation object \\ with id \" + str(resource_uuid) # Get traffic", "flask import Flask, jsonify, request from tngsdk.traffic import traffic LOG = logging.getLogger(os.path.basename(__file__)) app", "products derived from this software without specific prior written # permission. # #", "the specific language governing permissions and # limitations under the License. # #", "names of its contributors may be used to endorse or promote # products", "200): response = jsonify({\"resource_uuid\": res['uuid']}) else: response = jsonify(res['message']) response.status_code = res['status'] return", "through # the Horizon 2020 and 5G-PPP programmes. The authors would like to", "id \" + str(resource_uuid) # Get traffic flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def get_status(flow_uuid):", "traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def remove_flow(flow_uuid): # TODO remove a traffic flow return", "\" + str(resource_uuid) # Get traffic flow status @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['GET']) def get_status(flow_uuid): #", "# funded by the European Commission under Grant number 761493 through # the", "\" + str(flow_uuid) # Removes traffic flow @app.route('/api/trafficgen/v1/flows/<int:flow_uuid>', methods=['DELETE']) def remove_flow(flow_uuid): # TODO", "QUOBIS SL. # ALL RIGHTS RESERVED. # # Licensed under the Apache License,", "written # permission. # # This work has been performed in the framework", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "def get_status(flow_uuid): # TODO get traffic flow status return \"Getting traffic flow status", "except ValueError: response = jsonify(\"JSON format error in request parameters\") response.status_code = 400", "jsonify, request from tngsdk.traffic import traffic LOG = logging.getLogger(os.path.basename(__file__)) app = Flask(__name__) #", "# acknowledge the contributions of their colleagues of the SONATA # partner consortium", "derived from this software without specific prior written # permission. # # This" ]
[ "./100-my_calculator.py <a> <operator> <b>\") sys.exit(1) ops = {\"+\": add, \"-\": sub, \"*\": mul,", "sub, mul, div import sys if len(sys.argv) - 1 != 3: print(\"Usage: ./100-my_calculator.py", "div import sys if len(sys.argv) - 1 != 3: print(\"Usage: ./100-my_calculator.py <a> <operator>", "\"-\": sub, \"*\": mul, \"/\": div} if sys.argv[2] not in list(ops.keys()): print(\"Unknown operator.", "div} if sys.argv[2] not in list(ops.keys()): print(\"Unknown operator. Available operators: +, -, *", "if __name__ == \"__main__\": \"\"\"Handle basic arithmetic operations.\"\"\" from calculator_1 import add, sub,", "\"*\": mul, \"/\": div} if sys.argv[2] not in list(ops.keys()): print(\"Unknown operator. Available operators:", "\"/\": div} if sys.argv[2] not in list(ops.keys()): print(\"Unknown operator. Available operators: +, -,", "sys.exit(1) a = int(sys.argv[1]) b = int(sys.argv[3]) print(\"{} {} {} = {}\".format(a, sys.argv[2],", "list(ops.keys()): print(\"Unknown operator. Available operators: +, -, * and /\") sys.exit(1) a =", "== \"__main__\": \"\"\"Handle basic arithmetic operations.\"\"\" from calculator_1 import add, sub, mul, div", "{\"+\": add, \"-\": sub, \"*\": mul, \"/\": div} if sys.argv[2] not in list(ops.keys()):", "operations.\"\"\" from calculator_1 import add, sub, mul, div import sys if len(sys.argv) -", "- 1 != 3: print(\"Usage: ./100-my_calculator.py <a> <operator> <b>\") sys.exit(1) ops = {\"+\":", "#!/usr/bin/python3 # 100-my_calculator.py if __name__ == \"__main__\": \"\"\"Handle basic arithmetic operations.\"\"\" from calculator_1", "not in list(ops.keys()): print(\"Unknown operator. Available operators: +, -, * and /\") sys.exit(1)", "ops = {\"+\": add, \"-\": sub, \"*\": mul, \"/\": div} if sys.argv[2] not", "+, -, * and /\") sys.exit(1) a = int(sys.argv[1]) b = int(sys.argv[3]) print(\"{}", "sub, \"*\": mul, \"/\": div} if sys.argv[2] not in list(ops.keys()): print(\"Unknown operator. Available", "* and /\") sys.exit(1) a = int(sys.argv[1]) b = int(sys.argv[3]) print(\"{} {} {}", "\"\"\"Handle basic arithmetic operations.\"\"\" from calculator_1 import add, sub, mul, div import sys", "int(sys.argv[1]) b = int(sys.argv[3]) print(\"{} {} {} = {}\".format(a, sys.argv[2], b, ops[sys.argv[2]](a, b)))", "# 100-my_calculator.py if __name__ == \"__main__\": \"\"\"Handle basic arithmetic operations.\"\"\" from calculator_1 import", "mul, \"/\": div} if sys.argv[2] not in list(ops.keys()): print(\"Unknown operator. Available operators: +,", "operator. Available operators: +, -, * and /\") sys.exit(1) a = int(sys.argv[1]) b", "mul, div import sys if len(sys.argv) - 1 != 3: print(\"Usage: ./100-my_calculator.py <a>", "-, * and /\") sys.exit(1) a = int(sys.argv[1]) b = int(sys.argv[3]) print(\"{} {}", "add, sub, mul, div import sys if len(sys.argv) - 1 != 3: print(\"Usage:", "1 != 3: print(\"Usage: ./100-my_calculator.py <a> <operator> <b>\") sys.exit(1) ops = {\"+\": add,", "arithmetic operations.\"\"\" from calculator_1 import add, sub, mul, div import sys if len(sys.argv)", "3: print(\"Usage: ./100-my_calculator.py <a> <operator> <b>\") sys.exit(1) ops = {\"+\": add, \"-\": sub,", "calculator_1 import add, sub, mul, div import sys if len(sys.argv) - 1 !=", "from calculator_1 import add, sub, mul, div import sys if len(sys.argv) - 1", "__name__ == \"__main__\": \"\"\"Handle basic arithmetic operations.\"\"\" from calculator_1 import add, sub, mul,", "100-my_calculator.py if __name__ == \"__main__\": \"\"\"Handle basic arithmetic operations.\"\"\" from calculator_1 import add,", "len(sys.argv) - 1 != 3: print(\"Usage: ./100-my_calculator.py <a> <operator> <b>\") sys.exit(1) ops =", "<a> <operator> <b>\") sys.exit(1) ops = {\"+\": add, \"-\": sub, \"*\": mul, \"/\":", "print(\"Usage: ./100-my_calculator.py <a> <operator> <b>\") sys.exit(1) ops = {\"+\": add, \"-\": sub, \"*\":", "add, \"-\": sub, \"*\": mul, \"/\": div} if sys.argv[2] not in list(ops.keys()): print(\"Unknown", "in list(ops.keys()): print(\"Unknown operator. Available operators: +, -, * and /\") sys.exit(1) a", "<reponame>oluwaseun-ebenezer/holbertonschool-higher_level_programming #!/usr/bin/python3 # 100-my_calculator.py if __name__ == \"__main__\": \"\"\"Handle basic arithmetic operations.\"\"\" from", "= {\"+\": add, \"-\": sub, \"*\": mul, \"/\": div} if sys.argv[2] not in", "operators: +, -, * and /\") sys.exit(1) a = int(sys.argv[1]) b = int(sys.argv[3])", "!= 3: print(\"Usage: ./100-my_calculator.py <a> <operator> <b>\") sys.exit(1) ops = {\"+\": add, \"-\":", "\"__main__\": \"\"\"Handle basic arithmetic operations.\"\"\" from calculator_1 import add, sub, mul, div import", "if sys.argv[2] not in list(ops.keys()): print(\"Unknown operator. Available operators: +, -, * and", "print(\"Unknown operator. Available operators: +, -, * and /\") sys.exit(1) a = int(sys.argv[1])", "and /\") sys.exit(1) a = int(sys.argv[1]) b = int(sys.argv[3]) print(\"{} {} {} =", "sys.exit(1) ops = {\"+\": add, \"-\": sub, \"*\": mul, \"/\": div} if sys.argv[2]", "a = int(sys.argv[1]) b = int(sys.argv[3]) print(\"{} {} {} = {}\".format(a, sys.argv[2], b,", "<b>\") sys.exit(1) ops = {\"+\": add, \"-\": sub, \"*\": mul, \"/\": div} if", "if len(sys.argv) - 1 != 3: print(\"Usage: ./100-my_calculator.py <a> <operator> <b>\") sys.exit(1) ops", "sys if len(sys.argv) - 1 != 3: print(\"Usage: ./100-my_calculator.py <a> <operator> <b>\") sys.exit(1)", "import add, sub, mul, div import sys if len(sys.argv) - 1 != 3:", "import sys if len(sys.argv) - 1 != 3: print(\"Usage: ./100-my_calculator.py <a> <operator> <b>\")", "Available operators: +, -, * and /\") sys.exit(1) a = int(sys.argv[1]) b =", "basic arithmetic operations.\"\"\" from calculator_1 import add, sub, mul, div import sys if", "/\") sys.exit(1) a = int(sys.argv[1]) b = int(sys.argv[3]) print(\"{} {} {} = {}\".format(a,", "sys.argv[2] not in list(ops.keys()): print(\"Unknown operator. Available operators: +, -, * and /\")", "<operator> <b>\") sys.exit(1) ops = {\"+\": add, \"-\": sub, \"*\": mul, \"/\": div}", "= int(sys.argv[1]) b = int(sys.argv[3]) print(\"{} {} {} = {}\".format(a, sys.argv[2], b, ops[sys.argv[2]](a," ]
[ "models class Migration(migrations.Migration): dependencies = [ ('Africa', '0005_auto_20200731_1203'), ] operations = [ migrations.AddField(", "migrations, models class Migration(migrations.Migration): dependencies = [ ('Africa', '0005_auto_20200731_1203'), ] operations = [", "3.0.8 on 2020-07-31 12:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "by Django 3.0.8 on 2020-07-31 12:04 from django.db import migrations, models class Migration(migrations.Migration):", "# Generated by Django 3.0.8 on 2020-07-31 12:04 from django.db import migrations, models", "('Africa', '0005_auto_20200731_1203'), ] operations = [ migrations.AddField( model_name='dish', name='images01', field=models.ImageField(default=1, upload_to='main_product/'), preserve_default=False, ),", "Django 3.0.8 on 2020-07-31 12:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Africa', '0005_auto_20200731_1203'), ] operations", "Generated by Django 3.0.8 on 2020-07-31 12:04 from django.db import migrations, models class", "[ ('Africa', '0005_auto_20200731_1203'), ] operations = [ migrations.AddField( model_name='dish', name='images01', field=models.ImageField(default=1, upload_to='main_product/'), preserve_default=False,", "dependencies = [ ('Africa', '0005_auto_20200731_1203'), ] operations = [ migrations.AddField( model_name='dish', name='images01', field=models.ImageField(default=1,", "Migration(migrations.Migration): dependencies = [ ('Africa', '0005_auto_20200731_1203'), ] operations = [ migrations.AddField( model_name='dish', name='images01',", "= [ ('Africa', '0005_auto_20200731_1203'), ] operations = [ migrations.AddField( model_name='dish', name='images01', field=models.ImageField(default=1, upload_to='main_product/'),", "on 2020-07-31 12:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "2020-07-31 12:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Africa',", "class Migration(migrations.Migration): dependencies = [ ('Africa', '0005_auto_20200731_1203'), ] operations = [ migrations.AddField( model_name='dish',", "'0005_auto_20200731_1203'), ] operations = [ migrations.AddField( model_name='dish', name='images01', field=models.ImageField(default=1, upload_to='main_product/'), preserve_default=False, ), ]", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Africa', '0005_auto_20200731_1203'), ]", "12:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Africa', '0005_auto_20200731_1203'),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('Africa', '0005_auto_20200731_1203'), ] operations =" ]
[ "daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users = 0 for status in daily_status: status.display_date = \"%d/%d\"", "def supply(request, page_name): \"\"\"supply view_objects for user status.\"\"\" _ = page_name _ =", "page_name): \"\"\"supply view_objects for user status.\"\"\" _ = page_name _ = request #todays_users", "_ = page_name _ = request #todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info = challenge_mgr.get_all_round_info() start", "= request #todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info = challenge_mgr.get_all_round_info() start = rounds_info[\"competition_start\"] daily_status =", "prior_day_users = 0 for status in daily_status: status.display_date = \"%d/%d\" % (status.short_date.month, status.short_date.day)", "= DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users = 0 for status in daily_status: status.display_date = \"%d/%d\" %", "\"%d/%d\" % (status.short_date.month, status.short_date.day) status.new_users = status.setup_users - prior_day_users prior_day_users = status.setup_users return", "= Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info = challenge_mgr.get_all_round_info() start = rounds_info[\"competition_start\"] daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users =", "= rounds_info[\"competition_start\"] daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users = 0 for status in daily_status: status.display_date", "for user gchart widget.\"\"\" from apps.managers.challenge_mgr import challenge_mgr from apps.widgets.status.models import DailyStatus def", "% (status.short_date.month, status.short_date.day) status.new_users = status.setup_users - prior_day_users prior_day_users = status.setup_users return {", "from apps.widgets.status.models import DailyStatus def supply(request, page_name): \"\"\"supply view_objects for user status.\"\"\" _", "request #todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info = challenge_mgr.get_all_round_info() start = rounds_info[\"competition_start\"] daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date')", "for user status.\"\"\" _ = page_name _ = request #todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info", "for status in daily_status: status.display_date = \"%d/%d\" % (status.short_date.month, status.short_date.day) status.new_users = status.setup_users", "status.short_date.day) status.new_users = status.setup_users - prior_day_users prior_day_users = status.setup_users return { \"daily_status\": daily_status,", "import DailyStatus def supply(request, page_name): \"\"\"supply view_objects for user status.\"\"\" _ = page_name", "in daily_status: status.display_date = \"%d/%d\" % (status.short_date.month, status.short_date.day) status.new_users = status.setup_users - prior_day_users", "= page_name _ = request #todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info = challenge_mgr.get_all_round_info() start =", "from apps.managers.challenge_mgr import challenge_mgr from apps.widgets.status.models import DailyStatus def supply(request, page_name): \"\"\"supply view_objects", "\"\"\"supply view_objects for user status.\"\"\" _ = page_name _ = request #todays_users =", "user status.\"\"\" _ = page_name _ = request #todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info =", "view_objects for user status.\"\"\" _ = page_name _ = request #todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today())", "status.\"\"\" _ = page_name _ = request #todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info = challenge_mgr.get_all_round_info()", "0 for status in daily_status: status.display_date = \"%d/%d\" % (status.short_date.month, status.short_date.day) status.new_users =", "\"\"\"handles request for user gchart widget.\"\"\" from apps.managers.challenge_mgr import challenge_mgr from apps.widgets.status.models import", "= challenge_mgr.get_all_round_info() start = rounds_info[\"competition_start\"] daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users = 0 for status", "challenge_mgr from apps.widgets.status.models import DailyStatus def supply(request, page_name): \"\"\"supply view_objects for user status.\"\"\"", "status in daily_status: status.display_date = \"%d/%d\" % (status.short_date.month, status.short_date.day) status.new_users = status.setup_users -", "status.display_date = \"%d/%d\" % (status.short_date.month, status.short_date.day) status.new_users = status.setup_users - prior_day_users prior_day_users =", "request for user gchart widget.\"\"\" from apps.managers.challenge_mgr import challenge_mgr from apps.widgets.status.models import DailyStatus", "apps.managers.challenge_mgr import challenge_mgr from apps.widgets.status.models import DailyStatus def supply(request, page_name): \"\"\"supply view_objects for", "DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users = 0 for status in daily_status: status.display_date = \"%d/%d\" % (status.short_date.month,", "challenge_mgr.get_all_round_info() start = rounds_info[\"competition_start\"] daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users = 0 for status in", "widget.\"\"\" from apps.managers.challenge_mgr import challenge_mgr from apps.widgets.status.models import DailyStatus def supply(request, page_name): \"\"\"supply", "supply(request, page_name): \"\"\"supply view_objects for user status.\"\"\" _ = page_name _ = request", "status.new_users = status.setup_users - prior_day_users prior_day_users = status.setup_users return { \"daily_status\": daily_status, }", "rounds_info = challenge_mgr.get_all_round_info() start = rounds_info[\"competition_start\"] daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users = 0 for", "apps.widgets.status.models import DailyStatus def supply(request, page_name): \"\"\"supply view_objects for user status.\"\"\" _ =", "rounds_info[\"competition_start\"] daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users = 0 for status in daily_status: status.display_date =", "user gchart widget.\"\"\" from apps.managers.challenge_mgr import challenge_mgr from apps.widgets.status.models import DailyStatus def supply(request,", "page_name _ = request #todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info = challenge_mgr.get_all_round_info() start = rounds_info[\"competition_start\"]", "_ = request #todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info = challenge_mgr.get_all_round_info() start = rounds_info[\"competition_start\"] daily_status", "= \"%d/%d\" % (status.short_date.month, status.short_date.day) status.new_users = status.setup_users - prior_day_users prior_day_users = status.setup_users", "#todays_users = Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info = challenge_mgr.get_all_round_info() start = rounds_info[\"competition_start\"] daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users", "gchart widget.\"\"\" from apps.managers.challenge_mgr import challenge_mgr from apps.widgets.status.models import DailyStatus def supply(request, page_name):", "import challenge_mgr from apps.widgets.status.models import DailyStatus def supply(request, page_name): \"\"\"supply view_objects for user", "daily_status: status.display_date = \"%d/%d\" % (status.short_date.month, status.short_date.day) status.new_users = status.setup_users - prior_day_users prior_day_users", "(status.short_date.month, status.short_date.day) status.new_users = status.setup_users - prior_day_users prior_day_users = status.setup_users return { \"daily_status\":", "start = rounds_info[\"competition_start\"] daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users = 0 for status in daily_status:", "DailyStatus def supply(request, page_name): \"\"\"supply view_objects for user status.\"\"\" _ = page_name _", "= 0 for status in daily_status: status.display_date = \"%d/%d\" % (status.short_date.month, status.short_date.day) status.new_users", "Profile.objects.filter(last_visit_date=datetime.datetime.today()) rounds_info = challenge_mgr.get_all_round_info() start = rounds_info[\"competition_start\"] daily_status = DailyStatus.objects.filter(short_date__gte=start).order_by('short_date') prior_day_users = 0" ]
[ "um valor: ')) n2 = int(input('digite um valor: ')) s = n1 +", "=====') n1 = int(input('digite um valor: ')) n2 = int(input('digite um valor: '))", "DESAFIO 003 =====') n1 = int(input('digite um valor: ')) n2 = int(input('digite um", "n2 = int(input('digite um valor: ')) s = n1 + n2 print(f'a soma", "s = n1 + n2 print(f'a soma entre {n1} e {n2} é {s}')", "valor: ')) s = n1 + n2 print(f'a soma entre {n1} e {n2}", "valor: ')) n2 = int(input('digite um valor: ')) s = n1 + n2", "um valor: ')) s = n1 + n2 print(f'a soma entre {n1} e", "')) n2 = int(input('digite um valor: ')) s = n1 + n2 print(f'a", "003 =====') n1 = int(input('digite um valor: ')) n2 = int(input('digite um valor:", "int(input('digite um valor: ')) n2 = int(input('digite um valor: ')) s = n1", "int(input('digite um valor: ')) s = n1 + n2 print(f'a soma entre {n1}", "= int(input('digite um valor: ')) s = n1 + n2 print(f'a soma entre", "')) s = n1 + n2 print(f'a soma entre {n1} e {n2} é", "= int(input('digite um valor: ')) n2 = int(input('digite um valor: ')) s =", "print('===== DESAFIO 003 =====') n1 = int(input('digite um valor: ')) n2 = int(input('digite", "n1 = int(input('digite um valor: ')) n2 = int(input('digite um valor: ')) s" ]
[ "'TemperatureProperty', 'title': 'Celsius', 'type': 'number', 'unit': '°C', 'readOnly': True })) server = WebThingServer(SingleThing(thing),", "celsius, metadata={ '@type': 'TemperatureProperty', 'title': 'Celsius', 'type': 'number', 'unit': '°C', 'readOnly': True }))", "SingleThing, WebThingServer import logging import tornado.ioloop from sensor import DS18B20 def run_server(): ds18", "celsius = Value(ds18.temperature().C) thing = Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor']) thing.add_property( Property( thing, 'celsius',", "'@type': 'TemperatureProperty', 'title': 'Celsius', 'type': 'number', 'unit': '°C', 'readOnly': True })) server =", "'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor']) thing.add_property( Property( thing, 'celsius', celsius, metadata={ '@type': 'TemperatureProperty', 'title': 'Celsius',", "'DS18B20', ['TemperatureSensor']) thing.add_property( Property( thing, 'celsius', celsius, metadata={ '@type': 'TemperatureProperty', 'title': 'Celsius', 'type':", "timer.start() try: logging.info('starting the server') server.start() except KeyboardInterrupt: logging.debug('stopping update task') timer.stop() logging.info('stopping", "the server') server.stop() logging.info('done') if __name__ == '__main__': logging.basicConfig( level=10, format=\"%(asctime)s %(filename)s:%(lineno)s %(levelname)s", "thing.add_property( Property( thing, 'celsius', celsius, metadata={ '@type': 'TemperatureProperty', 'title': 'Celsius', 'type': 'number', 'unit':", "task') timer.stop() logging.info('stopping the server') server.stop() logging.info('done') if __name__ == '__main__': logging.basicConfig( level=10,", "the server') server.start() except KeyboardInterrupt: logging.debug('stopping update task') timer.stop() logging.info('stopping the server') server.stop()", "def run_server(): ds18 = DS18B20('28-03199779f5a1') celsius = Value(ds18.temperature().C) thing = Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20',", "import logging import tornado.ioloop from sensor import DS18B20 def run_server(): ds18 = DS18B20('28-03199779f5a1')", "Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor']) thing.add_property( Property( thing, 'celsius', celsius, metadata={ '@type': 'TemperatureProperty', 'title':", "logging.info('starting the server') server.start() except KeyboardInterrupt: logging.debug('stopping update task') timer.stop() logging.info('stopping the server')", "WebThingServer import logging import tornado.ioloop from sensor import DS18B20 def run_server(): ds18 =", "DS18B20 def run_server(): ds18 = DS18B20('28-03199779f5a1') celsius = Value(ds18.temperature().C) thing = Thing( 'urn:dev:ops:temperature-sensor',", "sensor import DS18B20 def run_server(): ds18 = DS18B20('28-03199779f5a1') celsius = Value(ds18.temperature().C) thing =", "'Celsius', 'type': 'number', 'unit': '°C', 'readOnly': True })) server = WebThingServer(SingleThing(thing), port=8888) def", "try: logging.info('starting the server') server.start() except KeyboardInterrupt: logging.debug('stopping update task') timer.stop() logging.info('stopping the", "Property, Value, SingleThing, WebThingServer import logging import tornado.ioloop from sensor import DS18B20 def", "WebThingServer(SingleThing(thing), port=8888) def update(): t = ds18.temperature() celsius.notify_of_external_update(t.C) timer = tornado.ioloop.PeriodicCallback(update, 3000) timer.start()", "timer = tornado.ioloop.PeriodicCallback(update, 3000) timer.start() try: logging.info('starting the server') server.start() except KeyboardInterrupt: logging.debug('stopping", "run_server(): ds18 = DS18B20('28-03199779f5a1') celsius = Value(ds18.temperature().C) thing = Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor'])", "from sensor import DS18B20 def run_server(): ds18 = DS18B20('28-03199779f5a1') celsius = Value(ds18.temperature().C) thing", "'title': 'Celsius', 'type': 'number', 'unit': '°C', 'readOnly': True })) server = WebThingServer(SingleThing(thing), port=8888)", "logging.info('stopping the server') server.stop() logging.info('done') if __name__ == '__main__': logging.basicConfig( level=10, format=\"%(asctime)s %(filename)s:%(lineno)s", "'type': 'number', 'unit': '°C', 'readOnly': True })) server = WebThingServer(SingleThing(thing), port=8888) def update():", "Value(ds18.temperature().C) thing = Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor']) thing.add_property( Property( thing, 'celsius', celsius, metadata={", "webthing import Thing, Property, Value, SingleThing, WebThingServer import logging import tornado.ioloop from sensor", "logging.debug('stopping update task') timer.stop() logging.info('stopping the server') server.stop() logging.info('done') if __name__ == '__main__':", "server.start() except KeyboardInterrupt: logging.debug('stopping update task') timer.stop() logging.info('stopping the server') server.stop() logging.info('done') if", "ds18.temperature() celsius.notify_of_external_update(t.C) timer = tornado.ioloop.PeriodicCallback(update, 3000) timer.start() try: logging.info('starting the server') server.start() except", "thing, 'celsius', celsius, metadata={ '@type': 'TemperatureProperty', 'title': 'Celsius', 'type': 'number', 'unit': '°C', 'readOnly':", "= Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor']) thing.add_property( Property( thing, 'celsius', celsius, metadata={ '@type': 'TemperatureProperty',", "import Thing, Property, Value, SingleThing, WebThingServer import logging import tornado.ioloop from sensor import", "def update(): t = ds18.temperature() celsius.notify_of_external_update(t.C) timer = tornado.ioloop.PeriodicCallback(update, 3000) timer.start() try: logging.info('starting", "update task') timer.stop() logging.info('stopping the server') server.stop() logging.info('done') if __name__ == '__main__': logging.basicConfig(", "t = ds18.temperature() celsius.notify_of_external_update(t.C) timer = tornado.ioloop.PeriodicCallback(update, 3000) timer.start() try: logging.info('starting the server')", "= tornado.ioloop.PeriodicCallback(update, 3000) timer.start() try: logging.info('starting the server') server.start() except KeyboardInterrupt: logging.debug('stopping update", "tornado.ioloop.PeriodicCallback(update, 3000) timer.start() try: logging.info('starting the server') server.start() except KeyboardInterrupt: logging.debug('stopping update task')", "DS18B20('28-03199779f5a1') celsius = Value(ds18.temperature().C) thing = Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor']) thing.add_property( Property( thing,", "server') server.stop() logging.info('done') if __name__ == '__main__': logging.basicConfig( level=10, format=\"%(asctime)s %(filename)s:%(lineno)s %(levelname)s %(message)s\"", "= WebThingServer(SingleThing(thing), port=8888) def update(): t = ds18.temperature() celsius.notify_of_external_update(t.C) timer = tornado.ioloop.PeriodicCallback(update, 3000)", "logging.info('done') if __name__ == '__main__': logging.basicConfig( level=10, format=\"%(asctime)s %(filename)s:%(lineno)s %(levelname)s %(message)s\" ) run_server()", "server = WebThingServer(SingleThing(thing), port=8888) def update(): t = ds18.temperature() celsius.notify_of_external_update(t.C) timer = tornado.ioloop.PeriodicCallback(update,", "port=8888) def update(): t = ds18.temperature() celsius.notify_of_external_update(t.C) timer = tornado.ioloop.PeriodicCallback(update, 3000) timer.start() try:", "['TemperatureSensor']) thing.add_property( Property( thing, 'celsius', celsius, metadata={ '@type': 'TemperatureProperty', 'title': 'Celsius', 'type': 'number',", "'readOnly': True })) server = WebThingServer(SingleThing(thing), port=8888) def update(): t = ds18.temperature() celsius.notify_of_external_update(t.C)", "True })) server = WebThingServer(SingleThing(thing), port=8888) def update(): t = ds18.temperature() celsius.notify_of_external_update(t.C) timer", "metadata={ '@type': 'TemperatureProperty', 'title': 'Celsius', 'type': 'number', 'unit': '°C', 'readOnly': True })) server", "= DS18B20('28-03199779f5a1') celsius = Value(ds18.temperature().C) thing = Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor']) thing.add_property( Property(", "'°C', 'readOnly': True })) server = WebThingServer(SingleThing(thing), port=8888) def update(): t = ds18.temperature()", "KeyboardInterrupt: logging.debug('stopping update task') timer.stop() logging.info('stopping the server') server.stop() logging.info('done') if __name__ ==", "timer.stop() logging.info('stopping the server') server.stop() logging.info('done') if __name__ == '__main__': logging.basicConfig( level=10, format=\"%(asctime)s", "= ds18.temperature() celsius.notify_of_external_update(t.C) timer = tornado.ioloop.PeriodicCallback(update, 3000) timer.start() try: logging.info('starting the server') server.start()", "3000) timer.start() try: logging.info('starting the server') server.start() except KeyboardInterrupt: logging.debug('stopping update task') timer.stop()", "logging import tornado.ioloop from sensor import DS18B20 def run_server(): ds18 = DS18B20('28-03199779f5a1') celsius", "import tornado.ioloop from sensor import DS18B20 def run_server(): ds18 = DS18B20('28-03199779f5a1') celsius =", "'celsius', celsius, metadata={ '@type': 'TemperatureProperty', 'title': 'Celsius', 'type': 'number', 'unit': '°C', 'readOnly': True", "ds18 = DS18B20('28-03199779f5a1') celsius = Value(ds18.temperature().C) thing = Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor']) thing.add_property(", "from webthing import Thing, Property, Value, SingleThing, WebThingServer import logging import tornado.ioloop from", "tornado.ioloop from sensor import DS18B20 def run_server(): ds18 = DS18B20('28-03199779f5a1') celsius = Value(ds18.temperature().C)", "server.stop() logging.info('done') if __name__ == '__main__': logging.basicConfig( level=10, format=\"%(asctime)s %(filename)s:%(lineno)s %(levelname)s %(message)s\" )", "except KeyboardInterrupt: logging.debug('stopping update task') timer.stop() logging.info('stopping the server') server.stop() logging.info('done') if __name__", "update(): t = ds18.temperature() celsius.notify_of_external_update(t.C) timer = tornado.ioloop.PeriodicCallback(update, 3000) timer.start() try: logging.info('starting the", "celsius.notify_of_external_update(t.C) timer = tornado.ioloop.PeriodicCallback(update, 3000) timer.start() try: logging.info('starting the server') server.start() except KeyboardInterrupt:", "server') server.start() except KeyboardInterrupt: logging.debug('stopping update task') timer.stop() logging.info('stopping the server') server.stop() logging.info('done')", "})) server = WebThingServer(SingleThing(thing), port=8888) def update(): t = ds18.temperature() celsius.notify_of_external_update(t.C) timer =", "'number', 'unit': '°C', 'readOnly': True })) server = WebThingServer(SingleThing(thing), port=8888) def update(): t", "Thing, Property, Value, SingleThing, WebThingServer import logging import tornado.ioloop from sensor import DS18B20", "thing = Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor']) thing.add_property( Property( thing, 'celsius', celsius, metadata={ '@type':", "<gh_stars>0 from webthing import Thing, Property, Value, SingleThing, WebThingServer import logging import tornado.ioloop", "= Value(ds18.temperature().C) thing = Thing( 'urn:dev:ops:temperature-sensor', 'DS18B20', ['TemperatureSensor']) thing.add_property( Property( thing, 'celsius', celsius,", "Value, SingleThing, WebThingServer import logging import tornado.ioloop from sensor import DS18B20 def run_server():", "'unit': '°C', 'readOnly': True })) server = WebThingServer(SingleThing(thing), port=8888) def update(): t =", "Property( thing, 'celsius', celsius, metadata={ '@type': 'TemperatureProperty', 'title': 'Celsius', 'type': 'number', 'unit': '°C',", "import DS18B20 def run_server(): ds18 = DS18B20('28-03199779f5a1') celsius = Value(ds18.temperature().C) thing = Thing(" ]
[ "from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap keypoints = [ [[100, 100, 2], [105,105, 2]]", "src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap keypoints = [ [[100, 100, 2], [105,105, 2]] ]", "getHeatmaps from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap keypoints = [ [[100, 100, 2], [105,105,", "[105,105, 2]] ] image = cv2.imread('images/person.jpg') hmaps = getHeatmaps(image, keypoints, 7) visualizeAllHeatmap(image, hmaps)", "[[100, 100, 2], [105,105, 2]] ] image = cv2.imread('images/person.jpg') hmaps = getHeatmaps(image, keypoints,", "2], [105,105, 2]] ] image = cv2.imread('images/person.jpg') hmaps = getHeatmaps(image, keypoints, 7) visualizeAllHeatmap(image,", "cv2 from src.utils.heatmap import getHeatmaps from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap keypoints = [", "2]] ] image = cv2.imread('images/person.jpg') hmaps = getHeatmaps(image, keypoints, 7) visualizeAllHeatmap(image, hmaps) visualizeBackgroundHeatmap(image,", "visualizeBackgroundHeatmap keypoints = [ [[100, 100, 2], [105,105, 2]] ] image = cv2.imread('images/person.jpg')", "import cv2 from src.utils.heatmap import getHeatmaps from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap keypoints =", "keypoints = [ [[100, 100, 2], [105,105, 2]] ] image = cv2.imread('images/person.jpg') hmaps", "from src.utils.heatmap import getHeatmaps from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap keypoints = [ [[100,", "import visualizeAllHeatmap, visualizeBackgroundHeatmap keypoints = [ [[100, 100, 2], [105,105, 2]] ] image", "image = cv2.imread('images/person.jpg') hmaps = getHeatmaps(image, keypoints, 7) visualizeAllHeatmap(image, hmaps) visualizeBackgroundHeatmap(image, hmaps) cv2.waitKey(0)", "= [ [[100, 100, 2], [105,105, 2]] ] image = cv2.imread('images/person.jpg') hmaps =", "import getHeatmaps from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap keypoints = [ [[100, 100, 2],", "src.utils.heatmap import getHeatmaps from src.visualization.visualize import visualizeAllHeatmap, visualizeBackgroundHeatmap keypoints = [ [[100, 100,", "] image = cv2.imread('images/person.jpg') hmaps = getHeatmaps(image, keypoints, 7) visualizeAllHeatmap(image, hmaps) visualizeBackgroundHeatmap(image, hmaps)", "[ [[100, 100, 2], [105,105, 2]] ] image = cv2.imread('images/person.jpg') hmaps = getHeatmaps(image,", "visualizeAllHeatmap, visualizeBackgroundHeatmap keypoints = [ [[100, 100, 2], [105,105, 2]] ] image =", "100, 2], [105,105, 2]] ] image = cv2.imread('images/person.jpg') hmaps = getHeatmaps(image, keypoints, 7)" ]
[ "text in X lines lastCut = 0 isLast = False for i in", "? lineCount = 1 if(w > imgWidthWithPadding): lineCount = int(round((w / imgWidthWithPadding) +", "msg[nextCut] != \" \": nextCut -= 1 print(\"new cut: {}\".format(nextCut)) lastCut = nextCut", "Image from PIL import ImageFont from PIL import ImageDraw import sys img =", "lineCount > 2: while 1: fontSize -= 2 font = ImageFont.truetype(\"impact.ttf\", fontSize) w,", "= int(round((w / imgWidthWithPadding) + 1)) if lineCount > 2: while 1: fontSize", "== \"top\": # textY = h * i #else: # textY = img.height", "from PIL import ImageFont from PIL import ImageDraw import sys img = Image.open(sys.argv[1])", "\": nextCut -= 1 print(\"new cut: {}\".format(nextCut)) lastCut = nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) #", "line still fitting ? w, h = draw.textsize(line, font) if not isLast and", "h = draw.textsize(msg, font) imgWidthWithPadding = img.width * 0.99 # 1. how many", "= -h if pos == \"bottom\": lastY = img.height - h * (lineCount+1)", "h * i #else: # textY = img.height - h * i textY", "w, h = draw.textsize(line, font) if not isLast and w > imgWidthWithPadding: print(\"overshot\")", "cut: {}\".format(nextCut)) line = msg[int(cut):int(nextCut)].strip() # is line still fitting ? w, h", "img.height - h * (lineCount+1) - 10 for i in range(0,lineCount): w, h", "True print(\"cut: {} -> {}\".format(cut, nextCut)) # make sure we don't cut words", "\" \": nextCut -= 1 print(\"new cut: {}\".format(nextCut)) lastCut = nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines)", "imgWidthWithPadding): lineCount = int(round((w / imgWidthWithPadding) + 1)) if lineCount > 2: while", "1 if(w > imgWidthWithPadding): lineCount = int(round((w / imgWidthWithPadding) + 1)) if lineCount", "font) lineCount = int(round((w / imgWidthWithPadding) + 1)) print(\"try again with fontSize={} =>", "if lastCut == 0: cut = (len(msg) / lineCount) * i else: cut", "print(\"new cut: {}\".format(nextCut)) lastCut = nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) # 3. print each line", "import ImageDraw import sys img = Image.open(sys.argv[1]) draw = ImageDraw.Draw(img) def drawText(msg, pos):", "nextCut -= 1 while msg[nextCut] != \" \": nextCut -= 1 print(\"new cut:", "h * (lineCount+1) - 10 for i in range(0,lineCount): w, h = draw.textsize(lines[i],", "sure we don't cut words in half if nextCut == len(msg) or msg[int(nextCut)]", "for i in range(0,lineCount): w, h = draw.textsize(lines[i], font) textX = img.width/2 -", "lastY + h offset = fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset,", "isLast = False for i in range(0,lineCount): if lastCut == 0: cut =", "0.99 # 1. how many lines for the msg to fit ? lineCount", "{}\".format(lineCount)) # 2. divide text in X lines lastCut = 0 isLast =", "import ImageFont from PIL import ImageDraw import sys img = Image.open(sys.argv[1]) draw =", "to https://github.com/lipsumar/meme-caption ''' from PIL import Image from PIL import ImageFont from PIL", "print(\"new cut: {}\".format(nextCut)) line = msg[int(cut):int(nextCut)].strip() # is line still fitting ? w,", "draw.textsize(msg, font) lineCount = int(round((w / imgWidthWithPadding) + 1)) print(\"try again with fontSize={}", "isLast = True print(\"cut: {} -> {}\".format(cut, nextCut)) # make sure we don't", "fit ? lineCount = 1 if(w > imgWidthWithPadding): lineCount = int(round((w / imgWidthWithPadding)", "1)) if lineCount > 2: while 1: fontSize -= 2 font = ImageFont.truetype(\"impact.ttf\",", "+ 1)) print(\"try again with fontSize={} => {}\".format(fontSize, lineCount)) if lineCount < 3", "1)) print(\"try again with fontSize={} => {}\".format(fontSize, lineCount)) if lineCount < 3 or", "draw.textsize(lines[i], font) textX = img.width/2 - w/2 #if pos == \"top\": # textY", "[] font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) imgWidthWithPadding = img.width", "msg[int(cut):int(nextCut)].strip() # is line still fitting ? w, h = draw.textsize(line, font) if", "don't cut words in half if nextCut == len(msg) or msg[int(nextCut)] == \"", "in range(0,lineCount): w, h = draw.textsize(lines[i], font) textX = img.width/2 - w/2 #if", "words in half if nextCut == len(msg) or msg[int(nextCut)] == \" \": print(\"may", "= fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX, textY),lines[i],(255,255,255),font=font) lastY", "lastCut if i < lineCount-1: nextCut = (len(msg) / lineCount) * (i+1) else:", "= ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) lineCount = int(round((w / imgWidthWithPadding)", "-> {}\".format(cut, nextCut)) # make sure we don't cut words in half if", "#else: # textY = img.height - h * i textY = lastY +", "length: {}\".format(len(msg))) print(\"Lines: {}\".format(lineCount)) # 2. divide text in X lines lastCut =", "print(\"Lines: {}\".format(lineCount)) # 2. divide text in X lines lastCut = 0 isLast", "ImageDraw.Draw(img) def drawText(msg, pos): fontSize = img.width//10 lines = [] font = ImageFont.truetype(\"impact.ttf\",", "-= 2 font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) lineCount =", "font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) imgWidthWithPadding = img.width *", "while 1: fontSize -= 2 font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg,", "fontSize < 10: break print(\"img.width: {}, text width: {}\".format(img.width, w)) print(\"Text length: {}\".format(len(msg)))", "isLast and w > imgWidthWithPadding: print(\"overshot\") nextCut -= 1 while msg[nextCut] != \"", "def drawText(msg, pos): fontSize = img.width//10 lines = [] font = ImageFont.truetype(\"impact.ttf\", fontSize)", "/ imgWidthWithPadding) + 1)) print(\"try again with fontSize={} => {}\".format(fontSize, lineCount)) if lineCount", "cut\") else: print(\"may not cut\") while msg[int(nextCut)] != \" \": nextCut += 1", "2: while 1: fontSize -= 2 font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h =", "int(round((w / imgWidthWithPadding) + 1)) print(\"try again with fontSize={} => {}\".format(fontSize, lineCount)) if", "lineCount = 1 if(w > imgWidthWithPadding): lineCount = int(round((w / imgWidthWithPadding) + 1))", "False for i in range(0,lineCount): if lastCut == 0: cut = (len(msg) /", "from PIL import Image from PIL import ImageFont from PIL import ImageDraw import", "(i+1) else: nextCut = len(msg) isLast = True print(\"cut: {} -> {}\".format(cut, nextCut))", "< lineCount-1: nextCut = (len(msg) / lineCount) * (i+1) else: nextCut = len(msg)", "the msg to fit ? lineCount = 1 if(w > imgWidthWithPadding): lineCount =", "write this. credit to https://github.com/lipsumar/meme-caption ''' from PIL import Image from PIL import", "\" \": nextCut += 1 print(\"new cut: {}\".format(nextCut)) line = msg[int(cut):int(nextCut)].strip() # is", "while msg[nextCut] != \" \": nextCut -= 1 print(\"new cut: {}\".format(nextCut)) lastCut =", "i in range(0,lineCount): if lastCut == 0: cut = (len(msg) / lineCount) *", "nextCut)) # make sure we don't cut words in half if nextCut ==", "textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX, textY),lines[i],(255,255,255),font=font) lastY = textY return drawText(sys.argv[2].upper(), \"top\")", "= False for i in range(0,lineCount): if lastCut == 0: cut = (len(msg)", "!= \" \": nextCut += 1 print(\"new cut: {}\".format(nextCut)) line = msg[int(cut):int(nextCut)].strip() #", "draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX, textY),lines[i],(255,255,255),font=font) lastY = textY", "draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX, textY),lines[i],(255,255,255),font=font) lastY = textY return drawText(sys.argv[2].upper(), \"top\") drawText(sys.argv[3].upper(), \"bottom\") img.save(sys.argv[4])", "= (len(msg) / lineCount) * (i+1) else: nextCut = len(msg) isLast = True", "else: cut = lastCut if i < lineCount-1: nextCut = (len(msg) / lineCount)", "len(msg) isLast = True print(\"cut: {} -> {}\".format(cut, nextCut)) # make sure we", "print(lines) # 3. print each line centered lastY = -h if pos ==", "{}\".format(len(msg))) print(\"Lines: {}\".format(lineCount)) # 2. divide text in X lines lastCut = 0", "{}\".format(fontSize, lineCount)) if lineCount < 3 or fontSize < 10: break print(\"img.width: {},", "fitting ? w, h = draw.textsize(line, font) if not isLast and w >", "> imgWidthWithPadding): lineCount = int(round((w / imgWidthWithPadding) + 1)) if lineCount > 2:", "for i in range(0,lineCount): if lastCut == 0: cut = (len(msg) / lineCount)", "3 or fontSize < 10: break print(\"img.width: {}, text width: {}\".format(img.width, w)) print(\"Text", "= msg[int(cut):int(nextCut)].strip() # is line still fitting ? w, h = draw.textsize(line, font)", "PIL import ImageDraw import sys img = Image.open(sys.argv[1]) draw = ImageDraw.Draw(img) def drawText(msg,", "many lines for the msg to fit ? lineCount = 1 if(w >", "# 2. divide text in X lines lastCut = 0 isLast = False", "= img.width//10 lines = [] font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg,", "with fontSize={} => {}\".format(fontSize, lineCount)) if lineCount < 3 or fontSize < 10:", "1 print(\"new cut: {}\".format(nextCut)) line = msg[int(cut):int(nextCut)].strip() # is line still fitting ?", "> imgWidthWithPadding: print(\"overshot\") nextCut -= 1 while msg[nextCut] != \" \": nextCut -=", "# textY = h * i #else: # textY = img.height - h", "lineCount = int(round((w / imgWidthWithPadding) + 1)) if lineCount > 2: while 1:", "if pos == \"bottom\": lastY = img.height - h * (lineCount+1) - 10", "== len(msg) or msg[int(nextCut)] == \" \": print(\"may cut\") else: print(\"may not cut\")", "fontSize) w, h = draw.textsize(msg, font) lineCount = int(round((w / imgWidthWithPadding) + 1))", "credit to https://github.com/lipsumar/meme-caption ''' from PIL import Image from PIL import ImageFont from", "textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX, textY),lines[i],(255,255,255),font=font) lastY = textY return", "range(0,lineCount): w, h = draw.textsize(lines[i], font) textX = img.width/2 - w/2 #if pos", "+ 1)) if lineCount > 2: while 1: fontSize -= 2 font =", "https://github.com/lipsumar/meme-caption ''' from PIL import Image from PIL import ImageFont from PIL import", "''' i actually didn't write this. credit to https://github.com/lipsumar/meme-caption ''' from PIL import", "= ImageDraw.Draw(img) def drawText(msg, pos): fontSize = img.width//10 lines = [] font =", "\" \": print(\"may cut\") else: print(\"may not cut\") while msg[int(nextCut)] != \" \":", "textY = img.height - h * i textY = lastY + h offset", "(lineCount+1) - 10 for i in range(0,lineCount): w, h = draw.textsize(lines[i], font) textX", "range(0,lineCount): if lastCut == 0: cut = (len(msg) / lineCount) * i else:", "msg to fit ? lineCount = 1 if(w > imgWidthWithPadding): lineCount = int(round((w", "Image.open(sys.argv[1]) draw = ImageDraw.Draw(img) def drawText(msg, pos): fontSize = img.width//10 lines = []", "from PIL import ImageDraw import sys img = Image.open(sys.argv[1]) draw = ImageDraw.Draw(img) def", "ImageFont from PIL import ImageDraw import sys img = Image.open(sys.argv[1]) draw = ImageDraw.Draw(img)", "imgWidthWithPadding) + 1)) if lineCount > 2: while 1: fontSize -= 2 font", "nextCut == len(msg) or msg[int(nextCut)] == \" \": print(\"may cut\") else: print(\"may not", "draw.textsize(line, font) if not isLast and w > imgWidthWithPadding: print(\"overshot\") nextCut -= 1", "= (len(msg) / lineCount) * i else: cut = lastCut if i <", "PIL import ImageFont from PIL import ImageDraw import sys img = Image.open(sys.argv[1]) draw", "textY = lastY + h offset = fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset,", "i in range(0,lineCount): w, h = draw.textsize(lines[i], font) textX = img.width/2 - w/2", "w, h = draw.textsize(msg, font) lineCount = int(round((w / imgWidthWithPadding) + 1)) print(\"try", "lineCount-1: nextCut = (len(msg) / lineCount) * (i+1) else: nextCut = len(msg) isLast", "centered lastY = -h if pos == \"bottom\": lastY = img.height - h", "-= 1 while msg[nextCut] != \" \": nextCut -= 1 print(\"new cut: {}\".format(nextCut))", "font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) lineCount = int(round((w /", "if lineCount < 3 or fontSize < 10: break print(\"img.width: {}, text width:", "= h * i #else: # textY = img.height - h * i", "or msg[int(nextCut)] == \" \": print(\"may cut\") else: print(\"may not cut\") while msg[int(nextCut)]", "= nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) # 3. print each line centered lastY = -h", "not isLast and w > imgWidthWithPadding: print(\"overshot\") nextCut -= 1 while msg[nextCut] !=", "for the msg to fit ? lineCount = 1 if(w > imgWidthWithPadding): lineCount", "imgWidthWithPadding = img.width * 0.99 # 1. how many lines for the msg", "= len(msg) isLast = True print(\"cut: {} -> {}\".format(cut, nextCut)) # make sure", "ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) imgWidthWithPadding = img.width * 0.99 #", "how many lines for the msg to fit ? lineCount = 1 if(w", "w > imgWidthWithPadding: print(\"overshot\") nextCut -= 1 while msg[nextCut] != \" \": nextCut", "lineCount)) if lineCount < 3 or fontSize < 10: break print(\"img.width: {}, text", "in X lines lastCut = 0 isLast = False for i in range(0,lineCount):", "draw.textsize(msg, font) imgWidthWithPadding = img.width * 0.99 # 1. how many lines for", "cut = lastCut if i < lineCount-1: nextCut = (len(msg) / lineCount) *", "2. divide text in X lines lastCut = 0 isLast = False for", "print(\"Text length: {}\".format(len(msg))) print(\"Lines: {}\".format(lineCount)) # 2. divide text in X lines lastCut", "print(\"try again with fontSize={} => {}\".format(fontSize, lineCount)) if lineCount < 3 or fontSize", "lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) # 3. print each line centered lastY = -h if pos", "* (i+1) else: nextCut = len(msg) isLast = True print(\"cut: {} -> {}\".format(cut,", "textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX, textY),lines[i],(255,255,255),font=font) lastY = textY return drawText(sys.argv[2].upper(), \"top\") drawText(sys.argv[3].upper(), \"bottom\")", "h = draw.textsize(lines[i], font) textX = img.width/2 - w/2 #if pos == \"top\":", "draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX, textY),lines[i],(255,255,255),font=font) lastY = textY return drawText(sys.argv[2].upper(),", "\"bottom\": lastY = img.height - h * (lineCount+1) - 10 for i in", "1 while msg[nextCut] != \" \": nextCut -= 1 print(\"new cut: {}\".format(nextCut)) lastCut", "{}\".format(cut, nextCut)) # make sure we don't cut words in half if nextCut", "cut words in half if nextCut == len(msg) or msg[int(nextCut)] == \" \":", "w, h = draw.textsize(lines[i], font) textX = img.width/2 - w/2 #if pos ==", "- h * (lineCount+1) - 10 for i in range(0,lineCount): w, h =", "fontSize -= 2 font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) lineCount", "lineCount) * i else: cut = lastCut if i < lineCount-1: nextCut =", "+ h offset = fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font)", "+= 1 print(\"new cut: {}\".format(nextCut)) line = msg[int(cut):int(nextCut)].strip() # is line still fitting", "= img.width/2 - w/2 #if pos == \"top\": # textY = h *", "2 font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) lineCount = int(round((w", "lines for the msg to fit ? lineCount = 1 if(w > imgWidthWithPadding):", "imgWidthWithPadding: print(\"overshot\") nextCut -= 1 while msg[nextCut] != \" \": nextCut -= 1", "lastY = img.height - h * (lineCount+1) - 10 for i in range(0,lineCount):", "pos): fontSize = img.width//10 lines = [] font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h", "actually didn't write this. credit to https://github.com/lipsumar/meme-caption ''' from PIL import Image from", "w/2 #if pos == \"top\": # textY = h * i #else: #", "fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX, textY),lines[i],(255,255,255),font=font) lastY =", "width: {}\".format(img.width, w)) print(\"Text length: {}\".format(len(msg))) print(\"Lines: {}\".format(lineCount)) # 2. divide text in", "cut = (len(msg) / lineCount) * i else: cut = lastCut if i", "pos == \"top\": # textY = h * i #else: # textY =", "line centered lastY = -h if pos == \"bottom\": lastY = img.height -", "lines = [] font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) imgWidthWithPadding", "{}\".format(nextCut)) lastCut = nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) # 3. print each line centered lastY", "half if nextCut == len(msg) or msg[int(nextCut)] == \" \": print(\"may cut\") else:", "10: break print(\"img.width: {}, text width: {}\".format(img.width, w)) print(\"Text length: {}\".format(len(msg))) print(\"Lines: {}\".format(lineCount))", "i textY = lastY + h offset = fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font)", "ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) lineCount = int(round((w / imgWidthWithPadding) +", "msg[int(nextCut)] != \" \": nextCut += 1 print(\"new cut: {}\".format(nextCut)) line = msg[int(cut):int(nextCut)].strip()", "fontSize) w, h = draw.textsize(msg, font) imgWidthWithPadding = img.width * 0.99 # 1.", "cut: {}\".format(nextCut)) lastCut = nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) # 3. print each line centered", "else: nextCut = len(msg) isLast = True print(\"cut: {} -> {}\".format(cut, nextCut)) #", "in half if nextCut == len(msg) or msg[int(nextCut)] == \" \": print(\"may cut\")", "in range(0,lineCount): if lastCut == 0: cut = (len(msg) / lineCount) * i", "# is line still fitting ? w, h = draw.textsize(line, font) if not", "< 3 or fontSize < 10: break print(\"img.width: {}, text width: {}\".format(img.width, w))", "sys img = Image.open(sys.argv[1]) draw = ImageDraw.Draw(img) def drawText(msg, pos): fontSize = img.width//10", "= int(round((w / imgWidthWithPadding) + 1)) print(\"try again with fontSize={} => {}\".format(fontSize, lineCount))", "int(round((w / imgWidthWithPadding) + 1)) if lineCount > 2: while 1: fontSize -=", "drawText(msg, pos): fontSize = img.width//10 lines = [] font = ImageFont.truetype(\"impact.ttf\", fontSize) w,", "divide text in X lines lastCut = 0 isLast = False for i", "font) if not isLast and w > imgWidthWithPadding: print(\"overshot\") nextCut -= 1 while", "nextCut += 1 print(\"new cut: {}\".format(nextCut)) line = msg[int(cut):int(nextCut)].strip() # is line still", "= draw.textsize(msg, font) imgWidthWithPadding = img.width * 0.99 # 1. how many lines", "img.height - h * i textY = lastY + h offset = fontSize//28", "import Image from PIL import ImageFont from PIL import ImageDraw import sys img", "* i else: cut = lastCut if i < lineCount-1: nextCut = (len(msg)", "1: fontSize -= 2 font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font)", "# textY = img.height - h * i textY = lastY + h", "{}\".format(nextCut)) line = msg[int(cut):int(nextCut)].strip() # is line still fitting ? w, h =", "cut\") while msg[int(nextCut)] != \" \": nextCut += 1 print(\"new cut: {}\".format(nextCut)) line", "? w, h = draw.textsize(line, font) if not isLast and w > imgWidthWithPadding:", "= draw.textsize(msg, font) lineCount = int(round((w / imgWidthWithPadding) + 1)) print(\"try again with", "ImageDraw import sys img = Image.open(sys.argv[1]) draw = ImageDraw.Draw(img) def drawText(msg, pos): fontSize", "len(msg) or msg[int(nextCut)] == \" \": print(\"may cut\") else: print(\"may not cut\") while", "= draw.textsize(line, font) if not isLast and w > imgWidthWithPadding: print(\"overshot\") nextCut -=", "print(\"may cut\") else: print(\"may not cut\") while msg[int(nextCut)] != \" \": nextCut +=", "lineCount < 3 or fontSize < 10: break print(\"img.width: {}, text width: {}\".format(img.width,", "/ lineCount) * (i+1) else: nextCut = len(msg) isLast = True print(\"cut: {}", "= 1 if(w > imgWidthWithPadding): lineCount = int(round((w / imgWidthWithPadding) + 1)) if", "draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX, textY),lines[i],(255,255,255),font=font) lastY = textY return drawText(sys.argv[2].upper(), \"top\") drawText(sys.argv[3].upper(),", "font) textX = img.width/2 - w/2 #if pos == \"top\": # textY =", "if nextCut == len(msg) or msg[int(nextCut)] == \" \": print(\"may cut\") else: print(\"may", "- h * i textY = lastY + h offset = fontSize//28 draw.text((textX-offset,", "> 2: while 1: fontSize -= 2 font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h", "msg[int(nextCut)] == \" \": print(\"may cut\") else: print(\"may not cut\") while msg[int(nextCut)] !=", "# 3. print each line centered lastY = -h if pos == \"bottom\":", "lastY = -h if pos == \"bottom\": lastY = img.height - h *", "= True print(\"cut: {} -> {}\".format(cut, nextCut)) # make sure we don't cut", "lineCount) * (i+1) else: nextCut = len(msg) isLast = True print(\"cut: {} ->", "i else: cut = lastCut if i < lineCount-1: nextCut = (len(msg) /", "each line centered lastY = -h if pos == \"bottom\": lastY = img.height", "10 for i in range(0,lineCount): w, h = draw.textsize(lines[i], font) textX = img.width/2", "print each line centered lastY = -h if pos == \"bottom\": lastY =", "and w > imgWidthWithPadding: print(\"overshot\") nextCut -= 1 while msg[nextCut] != \" \":", "3. print each line centered lastY = -h if pos == \"bottom\": lastY", "{}\".format(img.width, w)) print(\"Text length: {}\".format(len(msg))) print(\"Lines: {}\".format(lineCount)) # 2. divide text in X", "\": print(\"may cut\") else: print(\"may not cut\") while msg[int(nextCut)] != \" \": nextCut", "break print(\"img.width: {}, text width: {}\".format(img.width, w)) print(\"Text length: {}\".format(len(msg))) print(\"Lines: {}\".format(lineCount)) #", "/ imgWidthWithPadding) + 1)) if lineCount > 2: while 1: fontSize -= 2", "= img.width * 0.99 # 1. how many lines for the msg to", "nextCut = len(msg) isLast = True print(\"cut: {} -> {}\".format(cut, nextCut)) # make", "< 10: break print(\"img.width: {}, text width: {}\".format(img.width, w)) print(\"Text length: {}\".format(len(msg))) print(\"Lines:", "(len(msg) / lineCount) * (i+1) else: nextCut = len(msg) isLast = True print(\"cut:", "font) imgWidthWithPadding = img.width * 0.99 # 1. how many lines for the", "== \" \": print(\"may cut\") else: print(\"may not cut\") while msg[int(nextCut)] != \"", "1 print(\"new cut: {}\".format(nextCut)) lastCut = nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) # 3. print each", "= [] font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) imgWidthWithPadding =", "print(\"may not cut\") while msg[int(nextCut)] != \" \": nextCut += 1 print(\"new cut:", "lines lastCut = 0 isLast = False for i in range(0,lineCount): if lastCut", "else: print(\"may not cut\") while msg[int(nextCut)] != \" \": nextCut += 1 print(\"new", "still fitting ? w, h = draw.textsize(line, font) if not isLast and w", "= img.height - h * i textY = lastY + h offset =", "is line still fitting ? w, h = draw.textsize(line, font) if not isLast", "= lastCut if i < lineCount-1: nextCut = (len(msg) / lineCount) * (i+1)", "w, h = draw.textsize(msg, font) imgWidthWithPadding = img.width * 0.99 # 1. how", "PIL import Image from PIL import ImageFont from PIL import ImageDraw import sys", "= 0 isLast = False for i in range(0,lineCount): if lastCut == 0:", "to fit ? lineCount = 1 if(w > imgWidthWithPadding): lineCount = int(round((w /", "=> {}\".format(fontSize, lineCount)) if lineCount < 3 or fontSize < 10: break print(\"img.width:", "img.width * 0.99 # 1. how many lines for the msg to fit", "= draw.textsize(lines[i], font) textX = img.width/2 - w/2 #if pos == \"top\": #", "we don't cut words in half if nextCut == len(msg) or msg[int(nextCut)] ==", "nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) # 3. print each line centered lastY = -h if", "if i < lineCount-1: nextCut = (len(msg) / lineCount) * (i+1) else: nextCut", "img.width//10 lines = [] font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font)", "- w/2 #if pos == \"top\": # textY = h * i #else:", "if(w > imgWidthWithPadding): lineCount = int(round((w / imgWidthWithPadding) + 1)) if lineCount >", "nextCut = (len(msg) / lineCount) * (i+1) else: nextCut = len(msg) isLast =", "while msg[int(nextCut)] != \" \": nextCut += 1 print(\"new cut: {}\".format(nextCut)) line =", "lineCount = int(round((w / imgWidthWithPadding) + 1)) print(\"try again with fontSize={} => {}\".format(fontSize,", "* i #else: # textY = img.height - h * i textY =", "textY = h * i #else: # textY = img.height - h *", "again with fontSize={} => {}\".format(fontSize, lineCount)) if lineCount < 3 or fontSize <", "text width: {}\".format(img.width, w)) print(\"Text length: {}\".format(len(msg))) print(\"Lines: {}\".format(lineCount)) # 2. divide text", "h = draw.textsize(line, font) if not isLast and w > imgWidthWithPadding: print(\"overshot\") nextCut", "\"top\": # textY = h * i #else: # textY = img.height -", "offset = fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX, textY),lines[i],(255,255,255),font=font)", "# make sure we don't cut words in half if nextCut == len(msg)", "= Image.open(sys.argv[1]) draw = ImageDraw.Draw(img) def drawText(msg, pos): fontSize = img.width//10 lines =", "#if pos == \"top\": # textY = h * i #else: # textY", "or fontSize < 10: break print(\"img.width: {}, text width: {}\".format(img.width, w)) print(\"Text length:", "1. how many lines for the msg to fit ? lineCount = 1", "= ImageFont.truetype(\"impact.ttf\", fontSize) w, h = draw.textsize(msg, font) imgWidthWithPadding = img.width * 0.99", "lastCut = nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) # 3. print each line centered lastY =", "h offset = fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX-offset, textY+offset),lines[i],(0,0,0),font=font) draw.text((textX,", "if lineCount > 2: while 1: fontSize -= 2 font = ImageFont.truetype(\"impact.ttf\", fontSize)", "i #else: # textY = img.height - h * i textY = lastY", "h * i textY = lastY + h offset = fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font)", "* 0.99 # 1. how many lines for the msg to fit ?", "== \"bottom\": lastY = img.height - h * (lineCount+1) - 10 for i", "\": nextCut += 1 print(\"new cut: {}\".format(nextCut)) line = msg[int(cut):int(nextCut)].strip() # is line", "print(\"overshot\") nextCut -= 1 while msg[nextCut] != \" \": nextCut -= 1 print(\"new", "this. credit to https://github.com/lipsumar/meme-caption ''' from PIL import Image from PIL import ImageFont", "pos == \"bottom\": lastY = img.height - h * (lineCount+1) - 10 for", "didn't write this. credit to https://github.com/lipsumar/meme-caption ''' from PIL import Image from PIL", "{}, text width: {}\".format(img.width, w)) print(\"Text length: {}\".format(len(msg))) print(\"Lines: {}\".format(lineCount)) # 2. divide", "w)) print(\"Text length: {}\".format(len(msg))) print(\"Lines: {}\".format(lineCount)) # 2. divide text in X lines", "print(\"cut: {} -> {}\".format(cut, nextCut)) # make sure we don't cut words in", "* (lineCount+1) - 10 for i in range(0,lineCount): w, h = draw.textsize(lines[i], font)", "0 isLast = False for i in range(0,lineCount): if lastCut == 0: cut", "fontSize = img.width//10 lines = [] font = ImageFont.truetype(\"impact.ttf\", fontSize) w, h =", "= lastY + h offset = fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset, textY+offset),lines[i],(0,0,0),font=font)", "* i textY = lastY + h offset = fontSize//28 draw.text((textX-offset, textY-offset),lines[i],(0,0,0),font=font) draw.text((textX+offset,", "img.width/2 - w/2 #if pos == \"top\": # textY = h * i", "img = Image.open(sys.argv[1]) draw = ImageDraw.Draw(img) def drawText(msg, pos): fontSize = img.width//10 lines", "= img.height - h * (lineCount+1) - 10 for i in range(0,lineCount): w,", "-= 1 print(\"new cut: {}\".format(nextCut)) lastCut = nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) # 3. print", "/ lineCount) * i else: cut = lastCut if i < lineCount-1: nextCut", "print(\"img.width: {}, text width: {}\".format(img.width, w)) print(\"Text length: {}\".format(len(msg))) print(\"Lines: {}\".format(lineCount)) # 2.", "i < lineCount-1: nextCut = (len(msg) / lineCount) * (i+1) else: nextCut =", "nextCut -= 1 print(\"new cut: {}\".format(nextCut)) lastCut = nextCut lines.append(msg[int(cut):int(nextCut)].strip()) print(lines) # 3.", "h = draw.textsize(msg, font) lineCount = int(round((w / imgWidthWithPadding) + 1)) print(\"try again", "== 0: cut = (len(msg) / lineCount) * i else: cut = lastCut", "line = msg[int(cut):int(nextCut)].strip() # is line still fitting ? w, h = draw.textsize(line,", "# 1. how many lines for the msg to fit ? lineCount =", "fontSize={} => {}\".format(fontSize, lineCount)) if lineCount < 3 or fontSize < 10: break", "i actually didn't write this. credit to https://github.com/lipsumar/meme-caption ''' from PIL import Image", "0: cut = (len(msg) / lineCount) * i else: cut = lastCut if", "lastCut == 0: cut = (len(msg) / lineCount) * i else: cut =", "''' from PIL import Image from PIL import ImageFont from PIL import ImageDraw", "imgWidthWithPadding) + 1)) print(\"try again with fontSize={} => {}\".format(fontSize, lineCount)) if lineCount <", "-h if pos == \"bottom\": lastY = img.height - h * (lineCount+1) -", "X lines lastCut = 0 isLast = False for i in range(0,lineCount): if", "if not isLast and w > imgWidthWithPadding: print(\"overshot\") nextCut -= 1 while msg[nextCut]", "lastCut = 0 isLast = False for i in range(0,lineCount): if lastCut ==", "{} -> {}\".format(cut, nextCut)) # make sure we don't cut words in half", "not cut\") while msg[int(nextCut)] != \" \": nextCut += 1 print(\"new cut: {}\".format(nextCut))", "import sys img = Image.open(sys.argv[1]) draw = ImageDraw.Draw(img) def drawText(msg, pos): fontSize =", "make sure we don't cut words in half if nextCut == len(msg) or", "!= \" \": nextCut -= 1 print(\"new cut: {}\".format(nextCut)) lastCut = nextCut lines.append(msg[int(cut):int(nextCut)].strip())", "(len(msg) / lineCount) * i else: cut = lastCut if i < lineCount-1:", "draw = ImageDraw.Draw(img) def drawText(msg, pos): fontSize = img.width//10 lines = [] font", "textX = img.width/2 - w/2 #if pos == \"top\": # textY = h", "- 10 for i in range(0,lineCount): w, h = draw.textsize(lines[i], font) textX =" ]
[ "* # noqa from .ft_stemming_trainer import FastTextStemmingTrainer # noqa from .w2v_stemming_trainer import Word2VecStemmingTrainer", ".corpus_stemmer import * # noqa from .ft_stemming_trainer import FastTextStemmingTrainer # noqa from .w2v_stemming_trainer", ".stem_generator import StemDict, StemGenerator, reduce_stem_dict # noqa from .stemming_trainer import StemmingTrainer, get_stats_path #", "from .corpus_stemmer import * # noqa from .ft_stemming_trainer import FastTextStemmingTrainer # noqa from", "# noqa from .ft_stemming_trainer import FastTextStemmingTrainer # noqa from .w2v_stemming_trainer import Word2VecStemmingTrainer #", "noqa from .corpus_stemmer import * # noqa from .ft_stemming_trainer import FastTextStemmingTrainer # noqa", "from .stem_generator import StemDict, StemGenerator, reduce_stem_dict # noqa from .stemming_trainer import StemmingTrainer, get_stats_path", "get_stats_path # noqa from .corpus_stemmer import * # noqa from .ft_stemming_trainer import FastTextStemmingTrainer", "import StemmingTrainer, get_stats_path # noqa from .corpus_stemmer import * # noqa from .ft_stemming_trainer", "import StemDict, StemGenerator, reduce_stem_dict # noqa from .stemming_trainer import StemmingTrainer, get_stats_path # noqa", ".stemming_trainer import StemmingTrainer, get_stats_path # noqa from .corpus_stemmer import * # noqa from", "# noqa from .stemming_trainer import StemmingTrainer, get_stats_path # noqa from .corpus_stemmer import *", "StemDict, StemGenerator, reduce_stem_dict # noqa from .stemming_trainer import StemmingTrainer, get_stats_path # noqa from", "StemmingTrainer, get_stats_path # noqa from .corpus_stemmer import * # noqa from .ft_stemming_trainer import", "# noqa from .corpus_stemmer import * # noqa from .ft_stemming_trainer import FastTextStemmingTrainer #", "noqa from .stemming_trainer import StemmingTrainer, get_stats_path # noqa from .corpus_stemmer import * #", "from .stemming_trainer import StemmingTrainer, get_stats_path # noqa from .corpus_stemmer import * # noqa", "noqa from .ft_stemming_trainer import FastTextStemmingTrainer # noqa from .w2v_stemming_trainer import Word2VecStemmingTrainer # noqa", "reduce_stem_dict # noqa from .stemming_trainer import StemmingTrainer, get_stats_path # noqa from .corpus_stemmer import", "import * # noqa from .ft_stemming_trainer import FastTextStemmingTrainer # noqa from .w2v_stemming_trainer import", "StemGenerator, reduce_stem_dict # noqa from .stemming_trainer import StemmingTrainer, get_stats_path # noqa from .corpus_stemmer" ]
[ "for i in range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) # call an input if granted and", "import * from lizard.util.rtl.interface import UseInterface from lizard.util.rtl.method import MethodSpec from lizard.util.rtl.case_mux import", "[] for client in clients: reqs.extend([ MethodSpec( '{}_peek'.format(client), args=None, rets={ 'msg': s.interface.MsgType, },", "lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface from lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter from lizard.util.rtl.pipeline_stage import PipelineStageInterface", "'{}_take'.format(client)).call, s.index_take_call[i]) s.arb = PriorityArbiter(ArbiterInterface(ninputs)) s.mux = CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs), [1 <<", "rdy=True, ), MethodSpec( '{}_take'.format(client), args=None, rets=None, call=True, rdy=False, ), ]) s.require(*reqs) ninputs =", "s.index_take_call[i].v = s.arb.grant_grant[i] & s.take_call s.connect(s.mux.mux_in_[i], s.index_peek_msg[i]) s.connect(s.mux.mux_default, 0) s.connect(s.mux.mux_select, s.arb.grant_grant) s.connect(s.peek_msg, s.mux.mux_out)", "client in clients: reqs.extend([ MethodSpec( '{}_peek'.format(client), args=None, rets={ 'msg': s.interface.MsgType, }, call=False, rdy=True,", "def compute_ready(): s.peek_rdy.v = (s.arb.grant_grant != 0) for i in range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i])", "i, client in enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i])", "in range(ninputs)]) @s.combinational def compute_ready(): s.peek_rdy.v = (s.arb.grant_grant != 0) for i in", "in enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i]) s.arb =", "import PipelineStageInterface def PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType, None) class PipelineArbiter(Model): def __init__(s, interface, clients):", "PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType, None) class PipelineArbiter(Model): def __init__(s, interface, clients): UseInterface(s, interface) reqs", "for client in clients: reqs.extend([ MethodSpec( '{}_peek'.format(client), args=None, rets={ 'msg': s.interface.MsgType, }, call=False,", "lizard.util.rtl.interface import UseInterface from lizard.util.rtl.method import MethodSpec from lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface from", "UseInterface from lizard.util.rtl.method import MethodSpec from lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface from lizard.util.rtl.arbiters import", "range(ninputs)] s.index_take_call = [Wire(1) for _ in range(ninputs)] for i, client in enumerate(clients):", "in range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) # call an input if granted and we are", "@s.combinational def compute_call(i=i): s.index_take_call[i].v = s.arb.grant_grant[i] & s.take_call s.connect(s.mux.mux_in_[i], s.index_peek_msg[i]) s.connect(s.mux.mux_default, 0) s.connect(s.mux.mux_select,", "rets=None, call=True, rdy=False, ), ]) s.require(*reqs) ninputs = len(clients) s.index_peek_msg = [Wire(s.interface.MsgType) for", "s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i]) s.arb = PriorityArbiter(ArbiterInterface(ninputs)) s.mux", "call an input if granted and we are being called @s.combinational def compute_call(i=i):", "import MethodSpec from lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface from lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter from", "s.index_peek_rdy = [Wire(1) for _ in range(ninputs)] s.index_take_call = [Wire(1) for _ in", "* from lizard.util.rtl.interface import UseInterface from lizard.util.rtl.method import MethodSpec from lizard.util.rtl.case_mux import CaseMux,", "s.index_take_call = [Wire(1) for _ in range(ninputs)] for i, client in enumerate(clients): s.connect(s.index_peek_msg[i],", "s.index_take_call[i]) s.arb = PriorityArbiter(ArbiterInterface(ninputs)) s.mux = CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs), [1 << i", "s.mux = CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs), [1 << i for i in range(ninputs)])", "}, call=False, rdy=True, ), MethodSpec( '{}_take'.format(client), args=None, rets=None, call=True, rdy=False, ), ]) s.require(*reqs)", "granted and we are being called @s.combinational def compute_call(i=i): s.index_take_call[i].v = s.arb.grant_grant[i] &", "# call an input if granted and we are being called @s.combinational def", "reqs = [] for client in clients: reqs.extend([ MethodSpec( '{}_peek'.format(client), args=None, rets={ 'msg':", "_ in range(ninputs)] for i, client in enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s,", "args=None, rets=None, call=True, rdy=False, ), ]) s.require(*reqs) ninputs = len(clients) s.index_peek_msg = [Wire(s.interface.MsgType)", "_ in range(ninputs)] s.index_take_call = [Wire(1) for _ in range(ninputs)] for i, client", "import CaseMux, CaseMuxInterface from lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter from lizard.util.rtl.pipeline_stage import PipelineStageInterface def", "if granted and we are being called @s.combinational def compute_call(i=i): s.index_take_call[i].v = s.arb.grant_grant[i]", "'{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i]) s.arb = PriorityArbiter(ArbiterInterface(ninputs)) s.mux = CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs),", "PriorityArbiter from lizard.util.rtl.pipeline_stage import PipelineStageInterface def PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType, None) class PipelineArbiter(Model): def", "for _ in range(ninputs)] s.index_take_call = [Wire(1) for _ in range(ninputs)] for i,", "'{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i]) s.arb = PriorityArbiter(ArbiterInterface(ninputs)) s.mux = CaseMux(", "lizard.util.rtl.pipeline_stage import PipelineStageInterface def PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType, None) class PipelineArbiter(Model): def __init__(s, interface,", "from pymtl import * from lizard.util.rtl.interface import UseInterface from lizard.util.rtl.method import MethodSpec from", "0) for i in range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) # call an input if granted", "i in range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) # call an input if granted and we", "[Wire(s.interface.MsgType) for _ in range(ninputs)] s.index_peek_rdy = [Wire(1) for _ in range(ninputs)] s.index_take_call", "for _ in range(ninputs)] s.index_peek_rdy = [Wire(1) for _ in range(ninputs)] s.index_take_call =", "called @s.combinational def compute_call(i=i): s.index_take_call[i].v = s.arb.grant_grant[i] & s.take_call s.connect(s.mux.mux_in_[i], s.index_peek_msg[i]) s.connect(s.mux.mux_default, 0)", "), ]) s.require(*reqs) ninputs = len(clients) s.index_peek_msg = [Wire(s.interface.MsgType) for _ in range(ninputs)]", "args=None, rets={ 'msg': s.interface.MsgType, }, call=False, rdy=True, ), MethodSpec( '{}_take'.format(client), args=None, rets=None, call=True,", "[Wire(1) for _ in range(ninputs)] for i, client in enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg)", "[Wire(1) for _ in range(ninputs)] s.index_take_call = [Wire(1) for _ in range(ninputs)] for", "in clients: reqs.extend([ MethodSpec( '{}_peek'.format(client), args=None, rets={ 'msg': s.interface.MsgType, }, call=False, rdy=True, ),", "@s.combinational def compute_ready(): s.peek_rdy.v = (s.arb.grant_grant != 0) for i in range(ninputs): s.connect(s.arb.grant_reqs[i],", "class PipelineArbiter(Model): def __init__(s, interface, clients): UseInterface(s, interface) reqs = [] for client", "= [Wire(1) for _ in range(ninputs)] for i, client in enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s,", "and we are being called @s.combinational def compute_call(i=i): s.index_take_call[i].v = s.arb.grant_grant[i] & s.take_call", "'{}_peek'.format(client), args=None, rets={ 'msg': s.interface.MsgType, }, call=False, rdy=True, ), MethodSpec( '{}_take'.format(client), args=None, rets=None,", "enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i]) s.arb = PriorityArbiter(ArbiterInterface(ninputs))", "PipelineStageInterface def PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType, None) class PipelineArbiter(Model): def __init__(s, interface, clients): UseInterface(s,", "), MethodSpec( '{}_take'.format(client), args=None, rets=None, call=True, rdy=False, ), ]) s.require(*reqs) ninputs = len(clients)", "CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs), [1 << i for i in range(ninputs)]) @s.combinational def compute_ready():", "import ArbiterInterface, PriorityArbiter from lizard.util.rtl.pipeline_stage import PipelineStageInterface def PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType, None) class", "for i in range(ninputs)]) @s.combinational def compute_ready(): s.peek_rdy.v = (s.arb.grant_grant != 0) for", "range(ninputs)] s.index_peek_rdy = [Wire(1) for _ in range(ninputs)] s.index_take_call = [Wire(1) for _", "rdy=False, ), ]) s.require(*reqs) ninputs = len(clients) s.index_peek_msg = [Wire(s.interface.MsgType) for _ in", "MethodSpec from lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface from lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter from lizard.util.rtl.pipeline_stage", "interface) reqs = [] for client in clients: reqs.extend([ MethodSpec( '{}_peek'.format(client), args=None, rets={", "ArbiterInterface, PriorityArbiter from lizard.util.rtl.pipeline_stage import PipelineStageInterface def PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType, None) class PipelineArbiter(Model):", "range(ninputs)] for i, client in enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s,", "from lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter from lizard.util.rtl.pipeline_stage import PipelineStageInterface def PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType,", "PipelineArbiter(Model): def __init__(s, interface, clients): UseInterface(s, interface) reqs = [] for client in", "!= 0) for i in range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) # call an input if", "(s.arb.grant_grant != 0) for i in range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) # call an input", "UseInterface(s, interface) reqs = [] for client in clients: reqs.extend([ MethodSpec( '{}_peek'.format(client), args=None,", "CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs), [1 << i for i in range(ninputs)]) @s.combinational def", "being called @s.combinational def compute_call(i=i): s.index_take_call[i].v = s.arb.grant_grant[i] & s.take_call s.connect(s.mux.mux_in_[i], s.index_peek_msg[i]) s.connect(s.mux.mux_default,", "'{}_take'.format(client), args=None, rets=None, call=True, rdy=False, ), ]) s.require(*reqs) ninputs = len(clients) s.index_peek_msg =", "MethodSpec( '{}_take'.format(client), args=None, rets=None, call=True, rdy=False, ), ]) s.require(*reqs) ninputs = len(clients) s.index_peek_msg", "s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i]) s.arb = PriorityArbiter(ArbiterInterface(ninputs)) s.mux = CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs), [1", "MethodSpec( '{}_peek'.format(client), args=None, rets={ 'msg': s.interface.MsgType, }, call=False, rdy=True, ), MethodSpec( '{}_take'.format(client), args=None,", "are being called @s.combinational def compute_call(i=i): s.index_take_call[i].v = s.arb.grant_grant[i] & s.take_call s.connect(s.mux.mux_in_[i], s.index_peek_msg[i])", "PriorityArbiter(ArbiterInterface(ninputs)) s.mux = CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs), [1 << i for i in", "len(clients) s.index_peek_msg = [Wire(s.interface.MsgType) for _ in range(ninputs)] s.index_peek_rdy = [Wire(1) for _", "import UseInterface from lizard.util.rtl.method import MethodSpec from lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface from lizard.util.rtl.arbiters", "= CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs), [1 << i for i in range(ninputs)]) @s.combinational", "= [Wire(1) for _ in range(ninputs)] s.index_take_call = [Wire(1) for _ in range(ninputs)]", "s.arb = PriorityArbiter(ArbiterInterface(ninputs)) s.mux = CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs), [1 << i for", "]) s.require(*reqs) ninputs = len(clients) s.index_peek_msg = [Wire(s.interface.MsgType) for _ in range(ninputs)] s.index_peek_rdy", "ninputs = len(clients) s.index_peek_msg = [Wire(s.interface.MsgType) for _ in range(ninputs)] s.index_peek_rdy = [Wire(1)", "<filename>lizard/core/rtl/pipeline_arbiter.py from pymtl import * from lizard.util.rtl.interface import UseInterface from lizard.util.rtl.method import MethodSpec", "i in range(ninputs)]) @s.combinational def compute_ready(): s.peek_rdy.v = (s.arb.grant_grant != 0) for i", "PipelineStageInterface(OutType, None) class PipelineArbiter(Model): def __init__(s, interface, clients): UseInterface(s, interface) reqs = []", "None) class PipelineArbiter(Model): def __init__(s, interface, clients): UseInterface(s, interface) reqs = [] for", "we are being called @s.combinational def compute_call(i=i): s.index_take_call[i].v = s.arb.grant_grant[i] & s.take_call s.connect(s.mux.mux_in_[i],", "s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i]) s.arb = PriorityArbiter(ArbiterInterface(ninputs)) s.mux = CaseMux( CaseMuxInterface(s.interface.MsgType,", "in range(ninputs)] s.index_take_call = [Wire(1) for _ in range(ninputs)] for i, client in", "def __init__(s, interface, clients): UseInterface(s, interface) reqs = [] for client in clients:", "s.peek_rdy.v = (s.arb.grant_grant != 0) for i in range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) # call", "reqs.extend([ MethodSpec( '{}_peek'.format(client), args=None, rets={ 'msg': s.interface.MsgType, }, call=False, rdy=True, ), MethodSpec( '{}_take'.format(client),", "pymtl import * from lizard.util.rtl.interface import UseInterface from lizard.util.rtl.method import MethodSpec from lizard.util.rtl.case_mux", "an input if granted and we are being called @s.combinational def compute_call(i=i): s.index_take_call[i].v", "in range(ninputs)] for i, client in enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy)", "lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter from lizard.util.rtl.pipeline_stage import PipelineStageInterface def PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType, None)", "from lizard.util.rtl.interface import UseInterface from lizard.util.rtl.method import MethodSpec from lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface", "s.index_peek_rdy[i]) # call an input if granted and we are being called @s.combinational", "in range(ninputs)] s.index_peek_rdy = [Wire(1) for _ in range(ninputs)] s.index_take_call = [Wire(1) for", "from lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface from lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter from lizard.util.rtl.pipeline_stage import", "from lizard.util.rtl.method import MethodSpec from lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface from lizard.util.rtl.arbiters import ArbiterInterface,", "_ in range(ninputs)] s.index_peek_rdy = [Wire(1) for _ in range(ninputs)] s.index_take_call = [Wire(1)", "= (s.arb.grant_grant != 0) for i in range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) # call an", "CaseMuxInterface from lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter from lizard.util.rtl.pipeline_stage import PipelineStageInterface def PipelineArbiterInterface(OutType): return", "clients): UseInterface(s, interface) reqs = [] for client in clients: reqs.extend([ MethodSpec( '{}_peek'.format(client),", "getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i]) s.arb = PriorityArbiter(ArbiterInterface(ninputs)) s.mux = CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs),", "range(ninputs)]) @s.combinational def compute_ready(): s.peek_rdy.v = (s.arb.grant_grant != 0) for i in range(ninputs):", "getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i]) s.arb = PriorityArbiter(ArbiterInterface(ninputs)) s.mux =", "def PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType, None) class PipelineArbiter(Model): def __init__(s, interface, clients): UseInterface(s, interface)", "= [] for client in clients: reqs.extend([ MethodSpec( '{}_peek'.format(client), args=None, rets={ 'msg': s.interface.MsgType,", "Bits(ninputs), ninputs), [1 << i for i in range(ninputs)]) @s.combinational def compute_ready(): s.peek_rdy.v", "def compute_call(i=i): s.index_take_call[i].v = s.arb.grant_grant[i] & s.take_call s.connect(s.mux.mux_in_[i], s.index_peek_msg[i]) s.connect(s.mux.mux_default, 0) s.connect(s.mux.mux_select, s.arb.grant_grant)", "ninputs), [1 << i for i in range(ninputs)]) @s.combinational def compute_ready(): s.peek_rdy.v =", "= PriorityArbiter(ArbiterInterface(ninputs)) s.mux = CaseMux( CaseMuxInterface(s.interface.MsgType, Bits(ninputs), ninputs), [1 << i for i", "clients: reqs.extend([ MethodSpec( '{}_peek'.format(client), args=None, rets={ 'msg': s.interface.MsgType, }, call=False, rdy=True, ), MethodSpec(", "lizard.util.rtl.method import MethodSpec from lizard.util.rtl.case_mux import CaseMux, CaseMuxInterface from lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter", "CaseMux, CaseMuxInterface from lizard.util.rtl.arbiters import ArbiterInterface, PriorityArbiter from lizard.util.rtl.pipeline_stage import PipelineStageInterface def PipelineArbiterInterface(OutType):", "for i, client in enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call,", "client in enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i], getattr(s, '{}_peek'.format(client)).rdy) s.connect(getattr(s, '{}_take'.format(client)).call, s.index_take_call[i]) s.arb", "'msg': s.interface.MsgType, }, call=False, rdy=True, ), MethodSpec( '{}_take'.format(client), args=None, rets=None, call=True, rdy=False, ),", "range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) # call an input if granted and we are being", "interface, clients): UseInterface(s, interface) reqs = [] for client in clients: reqs.extend([ MethodSpec(", "__init__(s, interface, clients): UseInterface(s, interface) reqs = [] for client in clients: reqs.extend([", "for _ in range(ninputs)] for i, client in enumerate(clients): s.connect(s.index_peek_msg[i], getattr(s, '{}_peek'.format(client)).msg) s.connect(s.index_peek_rdy[i],", "call=False, rdy=True, ), MethodSpec( '{}_take'.format(client), args=None, rets=None, call=True, rdy=False, ), ]) s.require(*reqs) ninputs", "compute_ready(): s.peek_rdy.v = (s.arb.grant_grant != 0) for i in range(ninputs): s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) #", "s.connect(s.arb.grant_reqs[i], s.index_peek_rdy[i]) # call an input if granted and we are being called", "input if granted and we are being called @s.combinational def compute_call(i=i): s.index_take_call[i].v =", "<< i for i in range(ninputs)]) @s.combinational def compute_ready(): s.peek_rdy.v = (s.arb.grant_grant !=", "[1 << i for i in range(ninputs)]) @s.combinational def compute_ready(): s.peek_rdy.v = (s.arb.grant_grant", "rets={ 'msg': s.interface.MsgType, }, call=False, rdy=True, ), MethodSpec( '{}_take'.format(client), args=None, rets=None, call=True, rdy=False,", "i for i in range(ninputs)]) @s.combinational def compute_ready(): s.peek_rdy.v = (s.arb.grant_grant != 0)", "return PipelineStageInterface(OutType, None) class PipelineArbiter(Model): def __init__(s, interface, clients): UseInterface(s, interface) reqs =", "= len(clients) s.index_peek_msg = [Wire(s.interface.MsgType) for _ in range(ninputs)] s.index_peek_rdy = [Wire(1) for", "= [Wire(s.interface.MsgType) for _ in range(ninputs)] s.index_peek_rdy = [Wire(1) for _ in range(ninputs)]", "s.interface.MsgType, }, call=False, rdy=True, ), MethodSpec( '{}_take'.format(client), args=None, rets=None, call=True, rdy=False, ), ])", "from lizard.util.rtl.pipeline_stage import PipelineStageInterface def PipelineArbiterInterface(OutType): return PipelineStageInterface(OutType, None) class PipelineArbiter(Model): def __init__(s,", "s.require(*reqs) ninputs = len(clients) s.index_peek_msg = [Wire(s.interface.MsgType) for _ in range(ninputs)] s.index_peek_rdy =", "call=True, rdy=False, ), ]) s.require(*reqs) ninputs = len(clients) s.index_peek_msg = [Wire(s.interface.MsgType) for _", "compute_call(i=i): s.index_take_call[i].v = s.arb.grant_grant[i] & s.take_call s.connect(s.mux.mux_in_[i], s.index_peek_msg[i]) s.connect(s.mux.mux_default, 0) s.connect(s.mux.mux_select, s.arb.grant_grant) s.connect(s.peek_msg,", "s.index_peek_msg = [Wire(s.interface.MsgType) for _ in range(ninputs)] s.index_peek_rdy = [Wire(1) for _ in" ]
[ "= F.relu(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return", "= self.fc2(x) output = F.log_softmax(x, dim=1) return output def log_weights(self, step, writer): writer.add_histogram('weights/conv1/weight',", "Net() def create_parallel_model(): return DataParallelPassthrough(Net()) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 =", "nn.Dropout2d(0.25) self.fc1 = nn.Linear(9216, 128) self.dropout2 = nn.Dropout2d(0.25) self.fc2 = nn.Linear(128, 10) def", "solves https://github.com/pytorch/pytorch/issues/16885 Basically, to allow the access of a model wrapped under DataParallel", "step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step)", "Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 =", "<gh_stars>1-10 import torch import torch.nn as nn import torch.nn.functional as F def create_model():", "torch.flatten(self.dropout1(x), 1) x = F.relu(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) output =", "step, writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data,", "access the underlying attributes with .module (e.g. model.module.someattr) \"\"\" def __getattr__(self, name): try:", "= F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = torch.flatten(self.dropout1(x), 1) x = F.relu(self.fc1(x))", "DataParallelPassthrough(Net()) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1)", "3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout2d(0.25) self.fc1 =", "class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2", "self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data,", "__init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64,", "access of a model wrapped under DataParallel one needs to always access the", "allow the access of a model wrapped under DataParallel one needs to always", "return DataParallelPassthrough(Net()) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3,", "step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step) class DataParallelPassthrough(torch.nn.DataParallel): \"\"\"", "= self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output def log_weights(self,", "F.relu(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output", "def log_weights(self, step, writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step)", "self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1)", "dim=1) return output def log_weights(self, step, writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step)", "as F def create_model(): return Net() def create_parallel_model(): return DataParallelPassthrough(Net()) class Net(nn.Module): def", "F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = torch.flatten(self.dropout1(x), 1) x = F.relu(self.fc1(x)) x", "2) x = torch.flatten(self.dropout1(x), 1) x = F.relu(self.fc1(x)) x = self.dropout2(x) x =", "self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout2d(0.25) self.fc1 = nn.Linear(9216, 128)", "self.dropout2 = nn.Dropout2d(0.25) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = F.relu(self.conv1(x))", "def create_model(): return Net() def create_parallel_model(): return DataParallelPassthrough(Net()) class Net(nn.Module): def __init__(self): super(Net,", "writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step) class DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This class solves https://github.com/pytorch/pytorch/issues/16885 Basically, to allow", "self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1", "1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout2d(0.25) self.fc1 = nn.Linear(9216,", "x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x =", "writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight',", "F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = torch.flatten(self.dropout1(x), 1) x", "attributes with .module (e.g. model.module.someattr) \"\"\" def __getattr__(self, name): try: return super().__getattr__(name) except", "(e.g. model.module.someattr) \"\"\" def __getattr__(self, name): try: return super().__getattr__(name) except AttributeError: return getattr(self.module,", "= nn.Dropout2d(0.25) self.fc1 = nn.Linear(9216, 128) self.dropout2 = nn.Dropout2d(0.25) self.fc2 = nn.Linear(128, 10)", "= nn.Linear(128, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x", "output = F.log_softmax(x, dim=1) return output def log_weights(self, step, writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step)", "class DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This class solves https://github.com/pytorch/pytorch/issues/16885 Basically, to allow the access of", "= nn.Dropout2d(0.25) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = F.relu(self.conv1(x)) x", "x = F.relu(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1)", "\"\"\" This class solves https://github.com/pytorch/pytorch/issues/16885 Basically, to allow the access of a model", "writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step)", "https://github.com/pytorch/pytorch/issues/16885 Basically, to allow the access of a model wrapped under DataParallel one", "10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.max_pool2d(x,", "import torch.nn as nn import torch.nn.functional as F def create_model(): return Net() def", "the access of a model wrapped under DataParallel one needs to always access", "self.fc1 = nn.Linear(9216, 128) self.dropout2 = nn.Dropout2d(0.25) self.fc2 = nn.Linear(128, 10) def forward(self,", "self.dropout1 = nn.Dropout2d(0.25) self.fc1 = nn.Linear(9216, 128) self.dropout2 = nn.Dropout2d(0.25) self.fc2 = nn.Linear(128,", "create_model(): return Net() def create_parallel_model(): return DataParallelPassthrough(Net()) class Net(nn.Module): def __init__(self): super(Net, self).__init__()", "self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step) class DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This class solves https://github.com/pytorch/pytorch/issues/16885 Basically,", "nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout2d(0.25)", "model.module.someattr) \"\"\" def __getattr__(self, name): try: return super().__getattr__(name) except AttributeError: return getattr(self.module, name)", "step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step) class DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This class solves https://github.com/pytorch/pytorch/issues/16885 Basically, to", "return Net() def create_parallel_model(): return DataParallelPassthrough(Net()) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1", "step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step)", "x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = torch.flatten(self.dropout1(x), 1) x =", "self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data,", "def create_parallel_model(): return DataParallelPassthrough(Net()) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1,", "needs to always access the underlying attributes with .module (e.g. model.module.someattr) \"\"\" def", "underlying attributes with .module (e.g. model.module.someattr) \"\"\" def __getattr__(self, name): try: return super().__getattr__(name)", "self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output def log_weights(self, step,", "64, 3, 1) self.dropout1 = nn.Dropout2d(0.25) self.fc1 = nn.Linear(9216, 128) self.dropout2 = nn.Dropout2d(0.25)", "step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step)", "import torch import torch.nn as nn import torch.nn.functional as F def create_model(): return", "class solves https://github.com/pytorch/pytorch/issues/16885 Basically, to allow the access of a model wrapped under", "def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2)", "output def log_weights(self, step, writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data,", "x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x, dim=1) return output def", "3, 1) self.dropout1 = nn.Dropout2d(0.25) self.fc1 = nn.Linear(9216, 128) self.dropout2 = nn.Dropout2d(0.25) self.fc2", "to allow the access of a model wrapped under DataParallel one needs to", "writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias',", "x = torch.flatten(self.dropout1(x), 1) x = F.relu(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x)", "This class solves https://github.com/pytorch/pytorch/issues/16885 Basically, to allow the access of a model wrapped", "self.fc2.bias.data, step) class DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This class solves https://github.com/pytorch/pytorch/issues/16885 Basically, to allow the", "torch.nn as nn import torch.nn.functional as F def create_model(): return Net() def create_parallel_model():", "self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step) class DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This class", "F def create_model(): return Net() def create_parallel_model(): return DataParallelPassthrough(Net()) class Net(nn.Module): def __init__(self):", "model wrapped under DataParallel one needs to always access the underlying attributes with", "super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3,", "self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data,", "= F.log_softmax(x, dim=1) return output def log_weights(self, step, writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias',", "step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step) class DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This class solves", "self.fc2 = nn.Linear(128, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x))", "x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = torch.flatten(self.dropout1(x),", "self.fc2(x) output = F.log_softmax(x, dim=1) return output def log_weights(self, step, writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data,", "nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout2d(0.25) self.fc1 = nn.Linear(9216, 128) self.dropout2 =", "the underlying attributes with .module (e.g. model.module.someattr) \"\"\" def __getattr__(self, name): try: return", "= torch.flatten(self.dropout1(x), 1) x = F.relu(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) output", "self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data,", "DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This class solves https://github.com/pytorch/pytorch/issues/16885 Basically, to allow the access of a", "= nn.Linear(9216, 128) self.dropout2 = nn.Dropout2d(0.25) self.fc2 = nn.Linear(128, 10) def forward(self, x):", "torch import torch.nn as nn import torch.nn.functional as F def create_model(): return Net()", "nn.Linear(9216, 128) self.dropout2 = nn.Dropout2d(0.25) self.fc2 = nn.Linear(128, 10) def forward(self, x): x", "F.max_pool2d(x, 2) x = torch.flatten(self.dropout1(x), 1) x = F.relu(self.fc1(x)) x = self.dropout2(x) x", "a model wrapped under DataParallel one needs to always access the underlying attributes", "log_weights(self, step, writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias',", ".module (e.g. model.module.someattr) \"\"\" def __getattr__(self, name): try: return super().__getattr__(name) except AttributeError: return", "import torch.nn.functional as F def create_model(): return Net() def create_parallel_model(): return DataParallelPassthrough(Net()) class", "writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step) class", "writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step) class DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This", "as nn import torch.nn.functional as F def create_model(): return Net() def create_parallel_model(): return", "self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step) class DataParallelPassthrough(torch.nn.DataParallel):", "to always access the underlying attributes with .module (e.g. model.module.someattr) \"\"\" def __getattr__(self,", "1) self.dropout1 = nn.Dropout2d(0.25) self.fc1 = nn.Linear(9216, 128) self.dropout2 = nn.Dropout2d(0.25) self.fc2 =", "Basically, to allow the access of a model wrapped under DataParallel one needs", "= nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 =", "always access the underlying attributes with .module (e.g. model.module.someattr) \"\"\" def __getattr__(self, name):", "writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight',", "create_parallel_model(): return DataParallelPassthrough(Net()) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32,", "step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias', self.fc1.bias.data, step) writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step)", "nn import torch.nn.functional as F def create_model(): return Net() def create_parallel_model(): return DataParallelPassthrough(Net())", "of a model wrapped under DataParallel one needs to always access the underlying", "return output def log_weights(self, step, writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight',", "writer.add_histogram('weights/fc2/weight', self.fc2.weight.data, step) writer.add_histogram('weights/fc2/bias', self.fc2.bias.data, step) class DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This class solves https://github.com/pytorch/pytorch/issues/16885", "x = F.max_pool2d(x, 2) x = torch.flatten(self.dropout1(x), 1) x = F.relu(self.fc1(x)) x =", "nn.Dropout2d(0.25) self.fc2 = nn.Linear(128, 10) def forward(self, x): x = F.relu(self.conv1(x)) x =", "under DataParallel one needs to always access the underlying attributes with .module (e.g.", "def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) self.conv2 = nn.Conv2d(32,", "one needs to always access the underlying attributes with .module (e.g. model.module.someattr) \"\"\"", "1) x = F.relu(self.fc1(x)) x = self.dropout2(x) x = self.fc2(x) output = F.log_softmax(x,", "= nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout2d(0.25) self.fc1 = nn.Linear(9216, 128) self.dropout2", "= F.max_pool2d(x, 2) x = torch.flatten(self.dropout1(x), 1) x = F.relu(self.fc1(x)) x = self.dropout2(x)", "step) class DataParallelPassthrough(torch.nn.DataParallel): \"\"\" This class solves https://github.com/pytorch/pytorch/issues/16885 Basically, to allow the access", "128) self.dropout2 = nn.Dropout2d(0.25) self.fc2 = nn.Linear(128, 10) def forward(self, x): x =", "F.log_softmax(x, dim=1) return output def log_weights(self, step, writer): writer.add_histogram('weights/conv1/weight', self.conv1.weight.data, step) writer.add_histogram('weights/conv1/bias', self.conv1.bias.data,", "with .module (e.g. model.module.someattr) \"\"\" def __getattr__(self, name): try: return super().__getattr__(name) except AttributeError:", "32, 3, 1) self.conv2 = nn.Conv2d(32, 64, 3, 1) self.dropout1 = nn.Dropout2d(0.25) self.fc1", "x = self.fc2(x) output = F.log_softmax(x, dim=1) return output def log_weights(self, step, writer):", "nn.Linear(128, 10) def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x =", "wrapped under DataParallel one needs to always access the underlying attributes with .module", "= F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x = torch.flatten(self.dropout1(x), 1)", "forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.max_pool2d(x, 2) x", "writer.add_histogram('weights/conv1/bias', self.conv1.bias.data, step) writer.add_histogram('weights/conv2/weight', self.conv2.weight.data, step) writer.add_histogram('weights/conv2/bias', self.conv2.bias.data, step) writer.add_histogram('weights/fc1/weight', self.fc1.weight.data, step) writer.add_histogram('weights/fc1/bias',", "torch.nn.functional as F def create_model(): return Net() def create_parallel_model(): return DataParallelPassthrough(Net()) class Net(nn.Module):", "DataParallel one needs to always access the underlying attributes with .module (e.g. model.module.someattr)" ]
[ "subnet fullyconnected_subnet = '24.0.0/5' # disabled layer fields disabled_fields = { 'disabled': 19,", "str_subnet is None: str_subnet = FULLYCONNECTED_SUBNET if fields is None: fields = FULLYCONNECTED_FIELDS", "interface): \"\"\" decode an IP interface to field values :param interface: an IP", "= '24.0/5' # disabled layer fields disabled_fields = { 'disabled': 11, } #", "subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a dict of (field_name,", "ipec.ip.decoder import Decoder from ipec.ip.core import max_decimal_value_of_binary # convolutional layer fields CONV_FIELDS =", "interface: an IP interface :type interface: Interface :return: boolean :rtype: bool \"\"\" return", "POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer class \"\"\" def __init__(self, str_subnet=None,", "#128 'stride_size': 2, #4 'mean': 4, #(0~15-7)/8 'std_dev': 4 # 0~16/16 #total bits:", "127.0.0.1/24 :type str_subnet: string :param fields: a dict of (field_name, num_of_bits) pair :type", "layer subnet fullyconnected_subnet = '24.0/5' # disabled layer fields disabled_fields = { 'disabled':", "\"\"\" PoolingLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet", "interface :param field_values: field values :type field_values: a dict of (field_name, field_value) pairs", "= '0.0/4' # pooling layer fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2,", "field_value) pairs :return: the layer interface :rtype: Interface \"\"\" interface = self.encoder.encode_2_interface(field_values) return", "cnn layers with xavier weight initialisation :return: \"\"\" # convolutional layer fields conv_fields", "7, 'stride_size': 4, 'mean': 9, 'std_dev': 9 } # convolutional layer subnet CONV_SUBNET", "return self.encode_2_interface(field_values) def check_interface_in_type(self, interface): \"\"\" check whether the interface belongs to this", "} # disabled layer subnet disabled_subnet = '32.0.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling':", "# disabled layer fields DISABLED_FIELDS = { 'disabled': 10, } # disabled layer", "return field_values def generate_random_interface(self): \"\"\" generate an IP interface with random settings :rtype:", "fields disabled_fields = { 'disabled': 19, } # disabled layer subnet disabled_subnet =", "num_of_bits) pair :type fields: dict \"\"\" if str_subnet is None: str_subnet = POOLING_SUBNET", "FULLYCONNECTED_SUBNET if fields is None: fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer):", "interface :type interface: Interface :return: a dict of (field_name, field_value) pairs :rtype: dict", "\"\"\" # convolutional layer fields conv_fields = { 'filter_size': 3, #8 'num_of_feature_maps': 7,", "fullyconnected_fields = { 'num_of_neurons': 11, 'mean': 4, 'std_dev': 4 # total bits: 19", "11, # total bits: 11 } # fully-connected layer subnet fullyconnected_subnet = '24.0/5'", "9 } # fully-connected layer subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' # disabled layer fields", "'mean': 4, #(0~15-7)/8 'std_dev': 4 # 0~16/16 #total bits: 20 } # convolutional", "fields CONV_FIELDS = { 'filter_size': 5, 'num_of_feature_maps': 7, 'stride_size': 4, 'mean': 9, 'std_dev':", "None: fields = POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer class \"\"\"", ":param interface: an IP interface :type interface: Interface :return: boolean :rtype: bool \"\"\"", "from ipec.ip.decoder import Decoder from ipec.ip.core import max_decimal_value_of_binary # convolutional layer fields CONV_FIELDS", "POOLING_SUBNET = '4.32.0.0.0/30' # fully-connected layer fields FULLYCONNECTED_FIELDS = { 'num_of_neurons': 11, 'mean':", "bytes IP :return: \"\"\" # convolutional layer fields conv_fields = { 'filter_size': 3,", "} # fully-connected layer subnet fullyconnected_subnet = '24.0.0/5' # disabled layer fields disabled_fields", "dict of (field_name, field_value) pairs :rtype: dict \"\"\" field_values = self.decoder.decode_2_field_values(interface) return field_values", "\"\"\" DisabledLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet", "'type': 1, 'placeholder': 6 # total bits: 11 } # pooling layer subnet", "4 # total bits: 19 } # fully-connected layer subnet fullyconnected_subnet = '24.0.0/5'", "'placeholder': 14 # total bits: 19 } # pooling layer subnet pooling_subnet =", "fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 14 #", "rand_value return self.encode_2_interface(field_values) def check_interface_in_type(self, interface): \"\"\" check whether the interface belongs to", "def initialise_cnn_layers_3_bytes(): \"\"\" initialise cnn layers with 3 bytes IP :return: \"\"\" #", "max_value = max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0, max_value+1) field_values[field_name] = rand_value return self.encode_2_interface(field_values) def", "= '16.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, # total", "fully-connected layer subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' # disabled layer fields DISABLED_FIELDS = {", "field_values: a dict of (field_name, field_value) pairs :return: the layer interface :rtype: Interface", "decode_2_field_values(self, interface): \"\"\" decode an IP interface to field values :param interface: an", "num_of_bits) pair :type fields: dict \"\"\" self.str_subnet = str_subnet self.fields = fields self.subnet", "def decode_2_field_values(self, interface): \"\"\" decode an IP interface to field values :param interface:", "if fields is None: fields = CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer): \"\"\"", "= { 'filter_size': 5, 'num_of_feature_maps': 7, 'stride_size': 4, 'mean': 9, 'std_dev': 9 }", "field values :param interface: an IP interface :type interface: Interface :return: a dict", "with xavier weight initialisation :return: \"\"\" # convolutional layer fields conv_fields = {", "{ 'num_of_neurons': 11, 'mean': 9, 'std_dev': 9 } # fully-connected layer subnet FULLYCONNECTED_SUBNET", "num_of_bits) pair :type fields: dict \"\"\" if str_subnet is None: str_subnet = CONV_SUBNET", "if fields is None: fields = POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer): \"\"\"", "str_subnet = FULLYCONNECTED_SUBNET if fields is None: fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields)", "11, 'mean': 4, 'std_dev': 4 # total bits: 19 } # fully-connected layer", "} # convolutional layer subnet conv_subnet = '0.0.0/4' # pooling layer fields pooling_fields", "pair :type fields: dict \"\"\" self.str_subnet = str_subnet self.fields = fields self.subnet =", "fields: a dict of (field_name, num_of_bits) pair :type fields: dict \"\"\" self.str_subnet =", "interface to field values :param interface: an IP interface :type interface: Interface :return:", "self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\"", "self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0, max_value+1) field_values[field_name] = rand_value return self.encode_2_interface(field_values)", "\"\"\" if str_subnet is None: str_subnet = FULLYCONNECTED_SUBNET if fields is None: fields", "str_subnet: string :param fields: a dict of (field_name, num_of_bits) pair :type fields: dict", "'kernel_size': 5, 'stride_size': 4, 'type': 1 } # pooling layer subnet POOLING_SUBNET =", "bool \"\"\" return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): \"\"\" ConvLayer class \"\"\" def __init__(self, str_subnet=None,", "3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 'mean': 4, #(0~15-7)/8 'std_dev': 4", "dict \"\"\" if str_subnet is None: str_subnet = DISABLED_SUBNET if fields is None:", "type :param interface: an IP interface :type interface: Interface :return: boolean :rtype: bool", ":param fields: a dict of (field_name, num_of_bits) pair :type fields: dict \"\"\" if", "'disabled': 11, } # disabled layer subnet disabled_subnet = '32.0/5' return { 'conv':", "layer fields FULLYCONNECTED_FIELDS = { 'num_of_neurons': 11, 'mean': 9, 'std_dev': 9 } #", "random settings :rtype: Interface :return: an IP interface \"\"\" field_values = {} for", "\"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields:", "'32.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet,", "encode filed values to an IP interface :param field_values: field values :type field_values:", "initialise cnn layers with 3 bytes IP :return: \"\"\" # convolutional layer fields", "field_values): \"\"\" encode filed values to an IP interface :param field_values: field values", "\"\"\" def __init__(self, str_subnet, fields): \"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24", "CONV_SUBNET = '0.0.0.0.0/6' # pooling layer fields POOLING_FIELDS = { 'kernel_size': 5, 'stride_size':", "class BaseCNNLayer: \"\"\" BaseCNNLayer class \"\"\" def __init__(self, str_subnet, fields): \"\"\" constructor :param", "fields is None: fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer", "disabled layer fields disabled_fields = { 'disabled': 19, } # disabled layer subnet", "convolutional layer subnet conv_subnet = '0.0/4' # pooling layer fields pooling_fields = {", "# convolutional layer fields conv_fields = { 'filter_size': 3, #8 'num_of_feature_maps': 7, #128", "as np from ipec.ip.core import parse_subnet_str from ipec.ip.core import IPStructure from ipec.ip.core import", "total bits: 11 } # fully-connected layer subnet fullyconnected_subnet = '24.0/5' # disabled", "9, 'std_dev': 9 } # convolutional layer subnet CONV_SUBNET = '0.0.0.0.0/6' # pooling", "layer subnet conv_subnet = '0.0/4' # pooling layer fields pooling_fields = { 'kernel_size':", "fullyconnected_subnet = '24.0.0/5' # disabled layer fields disabled_fields = { 'disabled': 19, }", "class PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param", "str_subnet is None: str_subnet = DISABLED_SUBNET if fields is None: fields = DISABLED_FIELDS", ":return: boolean :rtype: bool \"\"\" return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): \"\"\" ConvLayer class \"\"\"", "'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 'mean': 4, #(0~15-7)/8 'std_dev':", "= { 'disabled': 11, } # disabled layer subnet disabled_subnet = '32.0/5' return", "from ipec.ip.core import parse_subnet_str from ipec.ip.core import IPStructure from ipec.ip.core import Interface from", "'num_of_feature_maps': 7, 'stride_size': 4, 'mean': 9, 'std_dev': 9 } # convolutional layer subnet", "#total bits: 12 } # convolutional layer subnet conv_subnet = '0.0/4' # pooling", "# 0~16/16 #total bits: 20 } # convolutional layer subnet conv_subnet = '0.0.0/4'", "return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields)", "5, 'num_of_feature_maps': 7, 'stride_size': 4, 'mean': 9, 'std_dev': 9 } # convolutional layer", "FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet:", "= FULLYCONNECTED_SUBNET if fields is None: fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields) class", "the layer interface :rtype: Interface \"\"\" interface = self.encoder.encode_2_interface(field_values) return interface def decode_2_field_values(self,", "'16.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, # total bits:", "{} for field_name in self.fields: num_of_bits = self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits) rand_value =", "'type': 1 } # pooling layer subnet POOLING_SUBNET = '4.32.0.0.0/30' # fully-connected layer", "(field_name, field_value) pairs :return: the layer interface :rtype: Interface \"\"\" interface = self.encoder.encode_2_interface(field_values)", "= '24.0.0/5' # disabled layer fields disabled_fields = { 'disabled': 19, } #", "{ 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 'mean': 4, #(0~15-7)/8", "an IP interface :type interface: Interface :return: boolean :rtype: bool \"\"\" return self.subnet.check_ip_in_subnet(interface.ip)", "interface def decode_2_field_values(self, interface): \"\"\" decode an IP interface to field values :param", "'16.0.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, 'mean': 4, 'std_dev':", "# pooling layer fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type': 1,", "= '0.0.0.0.0/6' # pooling layer fields POOLING_FIELDS = { 'kernel_size': 5, 'stride_size': 4,", "{ 'disabled': 19, } # disabled layer subnet disabled_subnet = '32.0.0/5' return {", "= POOLING_SUBNET if fields is None: fields = POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields) class", "ConvLayer(BaseCNNLayer): \"\"\" ConvLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet:", "pair :type fields: dict \"\"\" if str_subnet is None: str_subnet = CONV_SUBNET if", "None: str_subnet = DISABLED_SUBNET if fields is None: fields = DISABLED_FIELDS super(DisabledLayer, self).__init__(str_subnet,", "Interface :return: boolean :rtype: bool \"\"\" return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): \"\"\" ConvLayer class", "for field_name in self.fields: num_of_bits = self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0,", "None: str_subnet = POOLING_SUBNET if fields is None: fields = POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet,", "self.fields: num_of_bits = self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0, max_value+1) field_values[field_name] =", "is None: fields = CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer class", "\"\"\" BaseCNNLayer class \"\"\" def __init__(self, str_subnet, fields): \"\"\" constructor :param str_subnet: subnet", "PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } def initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise", ":rtype: dict \"\"\" field_values = self.decoder.decode_2_field_values(interface) return field_values def generate_random_interface(self): \"\"\" generate an", "'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 #total bits: 12 }", "dict \"\"\" if str_subnet is None: str_subnet = CONV_SUBNET if fields is None:", ":type str_subnet: string :param fields: a dict of (field_name, num_of_bits) pair :type fields:", "# pooling layer subnet pooling_subnet = '16.0/5' # fully-connected layer fields fullyconnected_fields =", "subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' # disabled layer fields DISABLED_FIELDS = { 'disabled': 10,", "layer fields disabled_fields = { 'disabled': 11, } # disabled layer subnet disabled_subnet", "ipec.ip.core import IPStructure from ipec.ip.core import Interface from ipec.ip.encoder import Encoder from ipec.ip.decoder", "= { 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 #total bits:", "if str_subnet is None: str_subnet = DISABLED_SUBNET if fields is None: fields =", "of (field_name, num_of_bits) pair :type fields: dict \"\"\" if str_subnet is None: str_subnet", "3 bytes IP :return: \"\"\" # convolutional layer fields conv_fields = { 'filter_size':", "7, #128 'stride_size': 2, #4 'mean': 4, #(0~15-7)/8 'std_dev': 4 # 0~16/16 #total", "an IP interface :type interface: Interface :return: a dict of (field_name, field_value) pairs", "\"\"\" if str_subnet is None: str_subnet = DISABLED_SUBNET if fields is None: fields", "= { 'num_of_neurons': 11, 'mean': 9, 'std_dev': 9 } # fully-connected layer subnet", "{ 'disabled': 11, } # disabled layer subnet disabled_subnet = '32.0/5' return {", "# fully-connected layer subnet fullyconnected_subnet = '24.0.0/5' # disabled layer fields disabled_fields =", "if str_subnet is None: str_subnet = POOLING_SUBNET if fields is None: fields =", "class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet string, e.g.", "4, 'std_dev': 4 # total bits: 19 } # fully-connected layer subnet fullyconnected_subnet", "2, #4 'mean': 4, #(0~15-7)/8 'std_dev': 4 # 0~16/16 #total bits: 20 }", "DisabledLayer(disabled_subnet, disabled_fields) } def initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise cnn layers with xavier weight initialisation", "interface :type interface: Interface :return: boolean :rtype: bool \"\"\" return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer):", "14 # total bits: 19 } # pooling layer subnet pooling_subnet = '16.0.0/5'", "'mean': 4, 'std_dev': 4 # total bits: 19 } # fully-connected layer subnet", ":type fields: dict \"\"\" if str_subnet is None: str_subnet = DISABLED_SUBNET if fields", "'num_of_feature_maps': 7, #128 'stride_size': 2, #4 #total bits: 12 } # convolutional layer", "fields = CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer class \"\"\" def", "str_subnet = POOLING_SUBNET if fields is None: fields = POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields)", "DISABLED_FIELDS = { 'disabled': 10, } # disabled layer subnet DISABLED_SUBNET = '4.32.0.4.0/30'", "# total bits: 11 } # pooling layer subnet pooling_subnet = '16.0/5' #", "# fully-connected layer fields FULLYCONNECTED_FIELDS = { 'num_of_neurons': 11, 'mean': 9, 'std_dev': 9", "{ 'num_of_neurons': 11, 'mean': 4, 'std_dev': 4 # total bits: 19 } #", "} # pooling layer subnet pooling_subnet = '16.0/5' # fully-connected layer fields fullyconnected_fields", "import Decoder from ipec.ip.core import max_decimal_value_of_binary # convolutional layer fields CONV_FIELDS = {", "{ 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 14 # total bits: 19", "'std_dev': 4 # 0~16/16 #total bits: 20 } # convolutional layer subnet conv_subnet", "num_of_bits = self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0, max_value+1) field_values[field_name] = rand_value", "} class BaseCNNLayer: \"\"\" BaseCNNLayer class \"\"\" def __init__(self, str_subnet, fields): \"\"\" constructor", "ipec.ip.core import Interface from ipec.ip.encoder import Encoder from ipec.ip.decoder import Decoder from ipec.ip.core", "initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise cnn layers with xavier weight initialisation :return: \"\"\" # convolutional", "'num_of_neurons': 11, 'mean': 9, 'std_dev': 9 } # fully-connected layer subnet FULLYCONNECTED_SUBNET =", "11, } # disabled layer subnet disabled_subnet = '32.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields),", "filed values to an IP interface :param field_values: field values :type field_values: a", "if fields is None: fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer): \"\"\"", "with random settings :rtype: Interface :return: an IP interface \"\"\" field_values = {}", "\"\"\" decode an IP interface to field values :param interface: an IP interface", "Decoder from ipec.ip.core import max_decimal_value_of_binary # convolutional layer fields CONV_FIELDS = { 'filter_size':", "4, 'mean': 9, 'std_dev': 9 } # convolutional layer subnet CONV_SUBNET = '0.0.0.0.0/6'", "\"\"\" initialise cnn layers with 3 bytes IP :return: \"\"\" # convolutional layer", "IP interface with random settings :rtype: Interface :return: an IP interface \"\"\" field_values", "fields is None: fields = CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer", "#8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 #total bits: 12 } # convolutional", "self.encoder = Encoder(self.ip_structure, self.subnet) self.decoder = Decoder() def encode_2_interface(self, field_values): \"\"\" encode filed", "fields: dict \"\"\" if str_subnet is None: str_subnet = FULLYCONNECTED_SUBNET if fields is", "Decoder() def encode_2_interface(self, field_values): \"\"\" encode filed values to an IP interface :param", "Interface :return: an IP interface \"\"\" field_values = {} for field_name in self.fields:", "\"\"\" check whether the interface belongs to this type :param interface: an IP", "# fully-connected layer subnet fullyconnected_subnet = '24.0/5' # disabled layer fields disabled_fields =", "layer subnet pooling_subnet = '16.0.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons':", "11 } # pooling layer subnet pooling_subnet = '16.0/5' # fully-connected layer fields", "# fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, # total bits: 11", "11 } # fully-connected layer subnet fullyconnected_subnet = '24.0/5' # disabled layer fields", "string :param fields: a dict of (field_name, num_of_bits) pair :type fields: dict \"\"\"", "= Encoder(self.ip_structure, self.subnet) self.decoder = Decoder() def encode_2_interface(self, field_values): \"\"\" encode filed values", "'mean': 9, 'std_dev': 9 } # convolutional layer subnet CONV_SUBNET = '0.0.0.0.0/6' #", "boolean :rtype: bool \"\"\" return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): \"\"\" ConvLayer class \"\"\" def", "self.ip_structure = IPStructure(fields) self.encoder = Encoder(self.ip_structure, self.subnet) self.decoder = Decoder() def encode_2_interface(self, field_values):", "fullyconnected_fields = { 'num_of_neurons': 11, # total bits: 11 } # fully-connected layer", "dict \"\"\" field_values = self.decoder.decode_2_field_values(interface) return field_values def generate_random_interface(self): \"\"\" generate an IP", "is None: fields = POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer class", "= { 'disabled': 19, } # disabled layer subnet disabled_subnet = '32.0.0/5' return", "disabled_fields) } class BaseCNNLayer: \"\"\" BaseCNNLayer class \"\"\" def __init__(self, str_subnet, fields): \"\"\"", "'32.0.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet,", ":param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a dict", "9, 'std_dev': 9 } # fully-connected layer subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' # disabled", ":return: \"\"\" # convolutional layer fields conv_fields = { 'filter_size': 3, #8 'num_of_feature_maps':", "weight initialisation :return: \"\"\" # convolutional layer fields conv_fields = { 'filter_size': 3,", "field_name in self.fields: num_of_bits = self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0, max_value+1)", "fields self.subnet = parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields) self.encoder = Encoder(self.ip_structure, self.subnet) self.decoder =", "Encoder(self.ip_structure, self.subnet) self.decoder = Decoder() def encode_2_interface(self, field_values): \"\"\" encode filed values to", "import Encoder from ipec.ip.decoder import Decoder from ipec.ip.core import max_decimal_value_of_binary # convolutional layer", "{ 'disabled': 10, } # disabled layer subnet DISABLED_SUBNET = '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes():", "'0.0.0.0.0/6' # pooling layer fields POOLING_FIELDS = { 'kernel_size': 5, 'stride_size': 4, 'type':", "interface): \"\"\" check whether the interface belongs to this type :param interface: an", "'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } class", "DisabledLayer(disabled_subnet, disabled_fields) } class BaseCNNLayer: \"\"\" BaseCNNLayer class \"\"\" def __init__(self, str_subnet, fields):", "\"\"\" field_values = self.decoder.decode_2_field_values(interface) return field_values def generate_random_interface(self): \"\"\" generate an IP interface", "str_subnet is None: str_subnet = POOLING_SUBNET if fields is None: fields = POOLING_FIELDS", "= parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields) self.encoder = Encoder(self.ip_structure, self.subnet) self.decoder = Decoder() def", ":return: an IP interface \"\"\" field_values = {} for field_name in self.fields: num_of_bits", "fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer class \"\"\" def", "conv_fields = { 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 #total", "subnet conv_subnet = '0.0.0/4' # pooling layer fields pooling_fields = { 'kernel_size': 2,", "interface belongs to this type :param interface: an IP interface :type interface: Interface", "max_value+1) field_values[field_name] = rand_value return self.encode_2_interface(field_values) def check_interface_in_type(self, interface): \"\"\" check whether the", "subnet CONV_SUBNET = '0.0.0.0.0/6' # pooling layer fields POOLING_FIELDS = { 'kernel_size': 5,", "disabled_fields) } def initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise cnn layers with xavier weight initialisation :return:", "fields is None: fields = POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer", "'24.0/5' # disabled layer fields disabled_fields = { 'disabled': 11, } # disabled", "FULLYCONNECTED_FIELDS = { 'num_of_neurons': 11, 'mean': 9, 'std_dev': 9 } # fully-connected layer", "= { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 14 # total bits:", "layer fields CONV_FIELDS = { 'filter_size': 5, 'num_of_feature_maps': 7, 'stride_size': 4, 'mean': 9,", "= { 'num_of_neurons': 11, 'mean': 4, 'std_dev': 4 # total bits: 19 }", "{ 'kernel_size': 5, 'stride_size': 4, 'type': 1 } # pooling layer subnet POOLING_SUBNET", "layers with xavier weight initialisation :return: \"\"\" # convolutional layer fields conv_fields =", "is None: fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer class", "IPStructure from ipec.ip.core import Interface from ipec.ip.encoder import Encoder from ipec.ip.decoder import Decoder", "pooling_subnet = '16.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, #", "subnet fullyconnected_subnet = '24.0/5' # disabled layer fields disabled_fields = { 'disabled': 11,", "interface = self.encoder.encode_2_interface(field_values) return interface def decode_2_field_values(self, interface): \"\"\" decode an IP interface", "of (field_name, field_value) pairs :rtype: dict \"\"\" field_values = self.decoder.decode_2_field_values(interface) return field_values def", "fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 6 #", "layer subnet DISABLED_SUBNET = '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): \"\"\" initialise cnn layers with 3", "fields disabled_fields = { 'disabled': 11, } # disabled layer subnet disabled_subnet =", "'std_dev': 4 # total bits: 19 } # fully-connected layer subnet fullyconnected_subnet =", "12 } # convolutional layer subnet conv_subnet = '0.0/4' # pooling layer fields", "dict \"\"\" if str_subnet is None: str_subnet = POOLING_SUBNET if fields is None:", "3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 #total bits: 12 } #", "FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' # disabled layer fields DISABLED_FIELDS = { 'disabled': 10, }", "'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } class BaseCNNLayer: \"\"\"", "rand_value = np.random.randint(0, max_value+1) field_values[field_name] = rand_value return self.encode_2_interface(field_values) def check_interface_in_type(self, interface): \"\"\"", "# fully-connected layer subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' # disabled layer fields DISABLED_FIELDS =", "decode an IP interface to field values :param interface: an IP interface :type", "None: fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer class \"\"\"", "2, 'stride_size': 2, 'type': 1, 'placeholder': 6 # total bits: 11 } #", "layer fields POOLING_FIELDS = { 'kernel_size': 5, 'stride_size': 4, 'type': 1 } #", "pooling layer fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder':", "conv_subnet = '0.0.0/4' # pooling layer fields pooling_fields = { 'kernel_size': 2, 'stride_size':", "(field_name, field_value) pairs :rtype: dict \"\"\" field_values = self.decoder.decode_2_field_values(interface) return field_values def generate_random_interface(self):", "interface: Interface :return: a dict of (field_name, field_value) pairs :rtype: dict \"\"\" field_values", "{ 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) }", "bits: 19 } # pooling layer subnet pooling_subnet = '16.0.0/5' # fully-connected layer", "this type :param interface: an IP interface :type interface: Interface :return: boolean :rtype:", "#total bits: 20 } # convolutional layer subnet conv_subnet = '0.0.0/4' # pooling", "initialise cnn layers with xavier weight initialisation :return: \"\"\" # convolutional layer fields", "from ipec.ip.core import max_decimal_value_of_binary # convolutional layer fields CONV_FIELDS = { 'filter_size': 5,", "= '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): \"\"\" initialise cnn layers with 3 bytes IP :return:", "subnet disabled_subnet = '32.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet,", "class DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param", "IP :return: \"\"\" # convolutional layer fields conv_fields = { 'filter_size': 3, #8", "a dict of (field_name, num_of_bits) pair :type fields: dict \"\"\" self.str_subnet = str_subnet", "'4.0.0.0.0/11' # disabled layer fields DISABLED_FIELDS = { 'disabled': 10, } # disabled", "# convolutional layer fields CONV_FIELDS = { 'filter_size': 5, 'num_of_feature_maps': 7, 'stride_size': 4,", "disabled_fields = { 'disabled': 11, } # disabled layer subnet disabled_subnet = '32.0/5'", "'4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): \"\"\" initialise cnn layers with 3 bytes IP :return: \"\"\"", ":type field_values: a dict of (field_name, field_value) pairs :return: the layer interface :rtype:", "import IPStructure from ipec.ip.core import Interface from ipec.ip.encoder import Encoder from ipec.ip.decoder import", "numpy as np from ipec.ip.core import parse_subnet_str from ipec.ip.core import IPStructure from ipec.ip.core", "= '32.0.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled':", "= { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 6 # total bits:", "__init__(self, str_subnet, fields): \"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet:", "dict of (field_name, num_of_bits) pair :type fields: dict \"\"\" self.str_subnet = str_subnet self.fields", "total bits: 19 } # fully-connected layer subnet fullyconnected_subnet = '24.0.0/5' # disabled", "fully-connected layer subnet fullyconnected_subnet = '24.0/5' # disabled layer fields disabled_fields = {", "'24.0.0/5' # disabled layer fields disabled_fields = { 'disabled': 19, } # disabled", "disabled layer fields disabled_fields = { 'disabled': 11, } # disabled layer subnet", "# disabled layer subnet disabled_subnet = '32.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet,", "self.subnet = parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields) self.encoder = Encoder(self.ip_structure, self.subnet) self.decoder = Decoder()", "disabled layer fields DISABLED_FIELDS = { 'disabled': 10, } # disabled layer subnet", "generate_random_interface(self): \"\"\" generate an IP interface with random settings :rtype: Interface :return: an", "self.fields = fields self.subnet = parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields) self.encoder = Encoder(self.ip_structure, self.subnet)", "\"\"\" field_values = {} for field_name in self.fields: num_of_bits = self.fields[field_name] max_value =", "# disabled layer fields disabled_fields = { 'disabled': 19, } # disabled layer", "if str_subnet is None: str_subnet = CONV_SUBNET if fields is None: fields =", "'stride_size': 2, 'type': 1, 'placeholder': 6 # total bits: 11 } # pooling", "pairs :return: the layer interface :rtype: Interface \"\"\" interface = self.encoder.encode_2_interface(field_values) return interface", "# total bits: 19 } # pooling layer subnet pooling_subnet = '16.0.0/5' #", "'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } def", "self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): \"\"\" ConvLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor", "pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 6 # total", "subnet POOLING_SUBNET = '4.32.0.0.0/30' # fully-connected layer fields FULLYCONNECTED_FIELDS = { 'num_of_neurons': 11,", "layers with 3 bytes IP :return: \"\"\" # convolutional layer fields conv_fields =", "fields): \"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param", "np from ipec.ip.core import parse_subnet_str from ipec.ip.core import IPStructure from ipec.ip.core import Interface", "values to an IP interface :param field_values: field values :type field_values: a dict", "BaseCNNLayer class \"\"\" def __init__(self, str_subnet, fields): \"\"\" constructor :param str_subnet: subnet string,", ":param interface: an IP interface :type interface: Interface :return: a dict of (field_name,", "field_values = {} for field_name in self.fields: num_of_bits = self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits)", "} # fully-connected layer subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' # disabled layer fields DISABLED_FIELDS", "fields conv_fields = { 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4", "'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } def initialise_cnn_layers_with_xavier_weights(): \"\"\"", "str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a dict of", "def initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise cnn layers with xavier weight initialisation :return: \"\"\" #", "self.decoder = Decoder() def encode_2_interface(self, field_values): \"\"\" encode filed values to an IP", "str_subnet is None: str_subnet = CONV_SUBNET if fields is None: fields = CONV_FIELDS", "fields) class PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor", "\"\"\" self.str_subnet = str_subnet self.fields = fields self.subnet = parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields)", "fields DISABLED_FIELDS = { 'disabled': 10, } # disabled layer subnet DISABLED_SUBNET =", "from ipec.ip.core import Interface from ipec.ip.encoder import Encoder from ipec.ip.decoder import Decoder from", "CONV_SUBNET if fields is None: fields = CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer):", "disabled layer subnet disabled_subnet = '32.0.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields),", "'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } def initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise cnn layers", "ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } class BaseCNNLayer:", "layer fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 6", "'disabled': 19, } # disabled layer subnet disabled_subnet = '32.0.0/5' return { 'conv':", "in self.fields: num_of_bits = self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0, max_value+1) field_values[field_name]", ":type fields: dict \"\"\" self.str_subnet = str_subnet self.fields = fields self.subnet = parse_subnet_str(str_subnet)", "of (field_name, field_value) pairs :return: the layer interface :rtype: Interface \"\"\" interface =", "from ipec.ip.core import IPStructure from ipec.ip.core import Interface from ipec.ip.encoder import Encoder from", "check_interface_in_type(self, interface): \"\"\" check whether the interface belongs to this type :param interface:", "'stride_size': 2, #4 'mean': 4, #(0~15-7)/8 'std_dev': 4 # 0~16/16 #total bits: 20", "#(0~15-7)/8 'std_dev': 4 # 0~16/16 #total bits: 20 } # convolutional layer subnet", "max_decimal_value_of_binary # convolutional layer fields CONV_FIELDS = { 'filter_size': 5, 'num_of_feature_maps': 7, 'stride_size':", "str_subnet, fields): \"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string", "layer subnet disabled_subnet = '32.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full':", "self.encode_2_interface(field_values) def check_interface_in_type(self, interface): \"\"\" check whether the interface belongs to this type", "= np.random.randint(0, max_value+1) field_values[field_name] = rand_value return self.encode_2_interface(field_values) def check_interface_in_type(self, interface): \"\"\" check", "num_of_bits) pair :type fields: dict \"\"\" if str_subnet is None: str_subnet = DISABLED_SUBNET", "an IP interface \"\"\" field_values = {} for field_name in self.fields: num_of_bits =", "whether the interface belongs to this type :param interface: an IP interface :type", "CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer class \"\"\" def __init__(self, str_subnet=None,", "{ 'filter_size': 5, 'num_of_feature_maps': 7, 'stride_size': 4, 'mean': 9, 'std_dev': 9 } #", "pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } class BaseCNNLayer: \"\"\" BaseCNNLayer class", "fields: dict \"\"\" self.str_subnet = str_subnet self.fields = fields self.subnet = parse_subnet_str(str_subnet) self.ip_structure", "from ipec.ip.encoder import Encoder from ipec.ip.decoder import Decoder from ipec.ip.core import max_decimal_value_of_binary #", "= CONV_SUBNET if fields is None: fields = CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields) class", "} # pooling layer subnet pooling_subnet = '16.0.0/5' # fully-connected layer fields fullyconnected_fields", "a dict of (field_name, num_of_bits) pair :type fields: dict \"\"\" if str_subnet is", "7, #128 'stride_size': 2, #4 #total bits: 12 } # convolutional layer subnet", "'std_dev': 9 } # convolutional layer subnet CONV_SUBNET = '0.0.0.0.0/6' # pooling layer", "subnet pooling_subnet = '16.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11,", "class FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param", "layer subnet pooling_subnet = '16.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons':", "str_subnet = DISABLED_SUBNET if fields is None: fields = DISABLED_FIELDS super(DisabledLayer, self).__init__(str_subnet, fields)", "of (field_name, num_of_bits) pair :type fields: dict \"\"\" self.str_subnet = str_subnet self.fields =", "} # convolutional layer subnet CONV_SUBNET = '0.0.0.0.0/6' # pooling layer fields POOLING_FIELDS", "pooling layer subnet pooling_subnet = '16.0/5' # fully-connected layer fields fullyconnected_fields = {", "layer fields conv_fields = { 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2,", "e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a dict of (field_name, num_of_bits) pair", "self.encoder.encode_2_interface(field_values) return interface def decode_2_field_values(self, interface): \"\"\" decode an IP interface to field", "PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } class BaseCNNLayer: \"\"\" BaseCNNLayer", "9 } # convolutional layer subnet CONV_SUBNET = '0.0.0.0.0/6' # pooling layer fields", "layer fields fullyconnected_fields = { 'num_of_neurons': 11, # total bits: 11 } #", "Encoder from ipec.ip.decoder import Decoder from ipec.ip.core import max_decimal_value_of_binary # convolutional layer fields", "initialisation :return: \"\"\" # convolutional layer fields conv_fields = { 'filter_size': 3, #8", "DISABLED_SUBNET = '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): \"\"\" initialise cnn layers with 3 bytes IP", "an IP interface to field values :param interface: an IP interface :type interface:", "layer fields disabled_fields = { 'disabled': 19, } # disabled layer subnet disabled_subnet", "2, 'type': 1, 'placeholder': 6 # total bits: 11 } # pooling layer", "dict \"\"\" if str_subnet is None: str_subnet = FULLYCONNECTED_SUBNET if fields is None:", "total bits: 19 } # pooling layer subnet pooling_subnet = '16.0.0/5' # fully-connected", "num_of_bits) pair :type fields: dict \"\"\" if str_subnet is None: str_subnet = FULLYCONNECTED_SUBNET", "bits: 11 } # fully-connected layer subnet fullyconnected_subnet = '24.0/5' # disabled layer", "disabled_subnet = '32.0.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields),", "layer fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 14", "} # disabled layer subnet disabled_subnet = '32.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling':", "'disabled': 10, } # disabled layer subnet DISABLED_SUBNET = '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): \"\"\"", "# convolutional layer subnet CONV_SUBNET = '0.0.0.0.0/6' # pooling layer fields POOLING_FIELDS =", "} # pooling layer subnet POOLING_SUBNET = '4.32.0.0.0/30' # fully-connected layer fields FULLYCONNECTED_FIELDS", "#4 #total bits: 12 } # convolutional layer subnet conv_subnet = '0.0/4' #", "def check_interface_in_type(self, interface): \"\"\" check whether the interface belongs to this type :param", "constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a", ":return: a dict of (field_name, field_value) pairs :rtype: dict \"\"\" field_values = self.decoder.decode_2_field_values(interface)", "check whether the interface belongs to this type :param interface: an IP interface", "return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): \"\"\" ConvLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\"", "layer subnet CONV_SUBNET = '0.0.0.0.0/6' # pooling layer fields POOLING_FIELDS = { 'kernel_size':", "19 } # pooling layer subnet pooling_subnet = '16.0.0/5' # fully-connected layer fields", "disabled_fields = { 'disabled': 19, } # disabled layer subnet disabled_subnet = '32.0.0/5'", "layer subnet disabled_subnet = '32.0.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full':", "= str_subnet self.fields = fields self.subnet = parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields) self.encoder =", "IP interface :param field_values: field values :type field_values: a dict of (field_name, field_value)", "19, } # disabled layer subnet disabled_subnet = '32.0.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields),", "is None: str_subnet = POOLING_SUBNET if fields is None: fields = POOLING_FIELDS super(PoolingLayer,", "'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 6 # total bits: 11 }", "'stride_size': 4, 'mean': 9, 'std_dev': 9 } # convolutional layer subnet CONV_SUBNET =", "fullyconnected_subnet = '24.0/5' # disabled layer fields disabled_fields = { 'disabled': 11, }", "#4 'mean': 4, #(0~15-7)/8 'std_dev': 4 # 0~16/16 #total bits: 20 } #", "fields=None): \"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string :param", ":type interface: Interface :return: a dict of (field_name, field_value) pairs :rtype: dict \"\"\"", "# disabled layer fields disabled_fields = { 'disabled': 11, } # disabled layer", "fully-connected layer subnet fullyconnected_subnet = '24.0.0/5' # disabled layer fields disabled_fields = {", "'stride_size': 4, 'type': 1 } # pooling layer subnet POOLING_SUBNET = '4.32.0.0.0/30' #", "str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet: string", "super(FullyConnectedLayer, self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer class \"\"\" def __init__(self, str_subnet=None, fields=None):", ":return: the layer interface :rtype: Interface \"\"\" interface = self.encoder.encode_2_interface(field_values) return interface def", "to field values :param interface: an IP interface :type interface: Interface :return: a", "to an IP interface :param field_values: field values :type field_values: a dict of", "# pooling layer subnet pooling_subnet = '16.0.0/5' # fully-connected layer fields fullyconnected_fields =", "= self.decoder.decode_2_field_values(interface) return field_values def generate_random_interface(self): \"\"\" generate an IP interface with random", "'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } class BaseCNNLayer: \"\"\" BaseCNNLayer class \"\"\"", "'stride_size': 2, 'type': 1, 'placeholder': 14 # total bits: 19 } # pooling", "dict of (field_name, num_of_bits) pair :type fields: dict \"\"\" if str_subnet is None:", "conv_fields = { 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 'mean':", "pairs :rtype: dict \"\"\" field_values = self.decoder.decode_2_field_values(interface) return field_values def generate_random_interface(self): \"\"\" generate", "import max_decimal_value_of_binary # convolutional layer fields CONV_FIELDS = { 'filter_size': 5, 'num_of_feature_maps': 7,", "to this type :param interface: an IP interface :type interface: Interface :return: boolean", "subnet conv_subnet = '0.0/4' # pooling layer fields pooling_fields = { 'kernel_size': 2,", "2, 'type': 1, 'placeholder': 14 # total bits: 19 } # pooling layer", "= self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0, max_value+1) field_values[field_name] = rand_value return", "PoolingLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet string,", "None: str_subnet = FULLYCONNECTED_SUBNET if fields is None: fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet,", "class \"\"\" def __init__(self, str_subnet, fields): \"\"\" constructor :param str_subnet: subnet string, e.g.", "interface \"\"\" field_values = {} for field_name in self.fields: num_of_bits = self.fields[field_name] max_value", "values :type field_values: a dict of (field_name, field_value) pairs :return: the layer interface", "4, 'type': 1 } # pooling layer subnet POOLING_SUBNET = '4.32.0.0.0/30' # fully-connected", "IP interface :type interface: Interface :return: a dict of (field_name, field_value) pairs :rtype:", "'disabled': DisabledLayer(disabled_subnet, disabled_fields) } class BaseCNNLayer: \"\"\" BaseCNNLayer class \"\"\" def __init__(self, str_subnet,", "# convolutional layer subnet conv_subnet = '0.0.0/4' # pooling layer fields pooling_fields =", "pooling_subnet = '16.0.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, 'mean':", "fields: dict \"\"\" if str_subnet is None: str_subnet = CONV_SUBNET if fields is", "values :param interface: an IP interface :type interface: Interface :return: a dict of", "belongs to this type :param interface: an IP interface :type interface: Interface :return:", "'num_of_feature_maps': 7, #128 'stride_size': 2, #4 'mean': 4, #(0~15-7)/8 'std_dev': 4 # 0~16/16", "POOLING_SUBNET if fields is None: fields = POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer):", "parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields) self.encoder = Encoder(self.ip_structure, self.subnet) self.decoder = Decoder() def encode_2_interface(self,", "#128 'stride_size': 2, #4 #total bits: 12 } # convolutional layer subnet conv_subnet", "pooling layer subnet POOLING_SUBNET = '4.32.0.0.0/30' # fully-connected layer fields FULLYCONNECTED_FIELDS = {", "import parse_subnet_str from ipec.ip.core import IPStructure from ipec.ip.core import Interface from ipec.ip.encoder import", "\"\"\" generate an IP interface with random settings :rtype: Interface :return: an IP", "1, 'placeholder': 6 # total bits: 11 } # pooling layer subnet pooling_subnet", "layer subnet fullyconnected_subnet = '24.0.0/5' # disabled layer fields disabled_fields = { 'disabled':", "layer fields fullyconnected_fields = { 'num_of_neurons': 11, 'mean': 4, 'std_dev': 4 # total", "\"\"\" ConvLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet", "field values :type field_values: a dict of (field_name, field_value) pairs :return: the layer", "} # fully-connected layer subnet fullyconnected_subnet = '24.0/5' # disabled layer fields disabled_fields", "'num_of_neurons': 11, # total bits: 11 } # fully-connected layer subnet fullyconnected_subnet =", "= '4.32.0.0.0/30' # fully-connected layer fields FULLYCONNECTED_FIELDS = { 'num_of_neurons': 11, 'mean': 9,", "convolutional layer fields CONV_FIELDS = { 'filter_size': 5, 'num_of_feature_maps': 7, 'stride_size': 4, 'mean':", "field_value) pairs :rtype: dict \"\"\" field_values = self.decoder.decode_2_field_values(interface) return field_values def generate_random_interface(self): \"\"\"", "= '4.0.0.0.0/11' # disabled layer fields DISABLED_FIELDS = { 'disabled': 10, } #", "ipec.ip.core import parse_subnet_str from ipec.ip.core import IPStructure from ipec.ip.core import Interface from ipec.ip.encoder", "initialise_cnn_layers_3_bytes(): \"\"\" initialise cnn layers with 3 bytes IP :return: \"\"\" # convolutional", "# total bits: 11 } # fully-connected layer subnet fullyconnected_subnet = '24.0/5' #", "1, 'placeholder': 14 # total bits: 19 } # pooling layer subnet pooling_subnet", "\"\"\" if str_subnet is None: str_subnet = CONV_SUBNET if fields is None: fields", "layer subnet conv_subnet = '0.0.0/4' # pooling layer fields pooling_fields = { 'kernel_size':", ":rtype: Interface :return: an IP interface \"\"\" field_values = {} for field_name in", "super(PoolingLayer, self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer class \"\"\" def __init__(self, str_subnet=None, fields=None):", "= CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer class \"\"\" def __init__(self,", "def encode_2_interface(self, field_values): \"\"\" encode filed values to an IP interface :param field_values:", "BaseCNNLayer: \"\"\" BaseCNNLayer class \"\"\" def __init__(self, str_subnet, fields): \"\"\" constructor :param str_subnet:", "def __init__(self, str_subnet, fields): \"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type", "{ 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 #total bits: 12", "(field_name, num_of_bits) pair :type fields: dict \"\"\" if str_subnet is None: str_subnet =", "dict of (field_name, field_value) pairs :return: the layer interface :rtype: Interface \"\"\" interface", "None: fields = CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer class \"\"\"", "= self.encoder.encode_2_interface(field_values) return interface def decode_2_field_values(self, interface): \"\"\" decode an IP interface to", "= '16.0.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, 'mean': 4,", "fields POOLING_FIELDS = { 'kernel_size': 5, 'stride_size': 4, 'type': 1 } # pooling", "subnet disabled_subnet = '32.0.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet,", "= rand_value return self.encode_2_interface(field_values) def check_interface_in_type(self, interface): \"\"\" check whether the interface belongs", "fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } def initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise cnn layers with xavier", "field_values: field values :type field_values: a dict of (field_name, field_value) pairs :return: the", "Interface :return: a dict of (field_name, field_value) pairs :rtype: dict \"\"\" field_values =", "11, 'mean': 9, 'std_dev': 9 } # fully-connected layer subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11'", "= IPStructure(fields) self.encoder = Encoder(self.ip_structure, self.subnet) self.decoder = Decoder() def encode_2_interface(self, field_values): \"\"\"", "super(ConvLayer, self).__init__(str_subnet, fields) class PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer class \"\"\" def __init__(self, str_subnet=None, fields=None):", "} # disabled layer subnet DISABLED_SUBNET = '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): \"\"\" initialise cnn", ":rtype: Interface \"\"\" interface = self.encoder.encode_2_interface(field_values) return interface def decode_2_field_values(self, interface): \"\"\" decode", "pair :type fields: dict \"\"\" if str_subnet is None: str_subnet = POOLING_SUBNET if", "19 } # fully-connected layer subnet fullyconnected_subnet = '24.0.0/5' # disabled layer fields", "= POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer class \"\"\" def __init__(self,", "IP interface \"\"\" field_values = {} for field_name in self.fields: num_of_bits = self.fields[field_name]", "encode_2_interface(self, field_values): \"\"\" encode filed values to an IP interface :param field_values: field", "'placeholder': 6 # total bits: 11 } # pooling layer subnet pooling_subnet =", "(field_name, num_of_bits) pair :type fields: dict \"\"\" self.str_subnet = str_subnet self.fields = fields", "= '32.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled':", "# pooling layer fields POOLING_FIELDS = { 'kernel_size': 5, 'stride_size': 4, 'type': 1", "# disabled layer subnet DISABLED_SUBNET = '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): \"\"\" initialise cnn layers", "# convolutional layer subnet conv_subnet = '0.0/4' # pooling layer fields pooling_fields =", ":type fields: dict \"\"\" if str_subnet is None: str_subnet = FULLYCONNECTED_SUBNET if fields", "10, } # disabled layer subnet DISABLED_SUBNET = '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): \"\"\" initialise", "} # convolutional layer subnet conv_subnet = '0.0/4' # pooling layer fields pooling_fields", "__init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type str_subnet:", "= {} for field_name in self.fields: num_of_bits = self.fields[field_name] max_value = max_decimal_value_of_binary(num_of_bits) rand_value", "fully-connected layer fields FULLYCONNECTED_FIELDS = { 'num_of_neurons': 11, 'mean': 9, 'std_dev': 9 }", "field_values[field_name] = rand_value return self.encode_2_interface(field_values) def check_interface_in_type(self, interface): \"\"\" check whether the interface", "IPStructure(fields) self.encoder = Encoder(self.ip_structure, self.subnet) self.decoder = Decoder() def encode_2_interface(self, field_values): \"\"\" encode", "subnet pooling_subnet = '16.0.0/5' # fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11,", "interface: an IP interface :type interface: Interface :return: a dict of (field_name, field_value)", "self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\"", ":type fields: dict \"\"\" if str_subnet is None: str_subnet = CONV_SUBNET if fields", "IP interface to field values :param interface: an IP interface :type interface: Interface", "fields = POOLING_FIELDS super(PoolingLayer, self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer class \"\"\" def", "layer fields DISABLED_FIELDS = { 'disabled': 10, } # disabled layer subnet DISABLED_SUBNET", "CONV_FIELDS = { 'filter_size': 5, 'num_of_feature_maps': 7, 'stride_size': 4, 'mean': 9, 'std_dev': 9", "fields) class FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor", "conv_subnet = '0.0/4' # pooling layer fields pooling_fields = { 'kernel_size': 2, 'stride_size':", "FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } class BaseCNNLayer: \"\"\" BaseCNNLayer class \"\"\" def", "pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 14 # total", "\"\"\" return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): \"\"\" ConvLayer class \"\"\" def __init__(self, str_subnet=None, fields=None):", "# fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, 'mean': 4, 'std_dev': 4", "field_values def generate_random_interface(self): \"\"\" generate an IP interface with random settings :rtype: Interface", "fields FULLYCONNECTED_FIELDS = { 'num_of_neurons': 11, 'mean': 9, 'std_dev': 9 } # fully-connected", "convolutional layer subnet CONV_SUBNET = '0.0.0.0.0/6' # pooling layer fields POOLING_FIELDS = {", "#8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 'mean': 4, #(0~15-7)/8 'std_dev': 4 #", "fields fullyconnected_fields = { 'num_of_neurons': 11, 'mean': 4, 'std_dev': 4 # total bits:", "ipec.ip.core import max_decimal_value_of_binary # convolutional layer fields CONV_FIELDS = { 'filter_size': 5, 'num_of_feature_maps':", ":param fields: a dict of (field_name, num_of_bits) pair :type fields: dict \"\"\" self.str_subnet", "dict \"\"\" self.str_subnet = str_subnet self.fields = fields self.subnet = parse_subnet_str(str_subnet) self.ip_structure =", "fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, # total bits: 11 }", "= { 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size': 2, #4 'mean': 4,", "disabled layer subnet disabled_subnet = '32.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields),", "settings :rtype: Interface :return: an IP interface \"\"\" field_values = {} for field_name", "1 } # pooling layer subnet POOLING_SUBNET = '4.32.0.0.0/30' # fully-connected layer fields", "disabled layer subnet DISABLED_SUBNET = '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): \"\"\" initialise cnn layers with", "fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } class BaseCNNLayer: \"\"\" BaseCNNLayer class \"\"\" def __init__(self,", "self.decoder.decode_2_field_values(interface) return field_values def generate_random_interface(self): \"\"\" generate an IP interface with random settings", "ipec.ip.encoder import Encoder from ipec.ip.decoder import Decoder from ipec.ip.core import max_decimal_value_of_binary # convolutional", "# total bits: 19 } # fully-connected layer subnet fullyconnected_subnet = '24.0.0/5' #", "interface :rtype: Interface \"\"\" interface = self.encoder.encode_2_interface(field_values) return interface def decode_2_field_values(self, interface): \"\"\"", "generate an IP interface with random settings :rtype: Interface :return: an IP interface", "= fields self.subnet = parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields) self.encoder = Encoder(self.ip_structure, self.subnet) self.decoder", ":type interface: Interface :return: boolean :rtype: bool \"\"\" return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): \"\"\"", "interface with random settings :rtype: Interface :return: an IP interface \"\"\" field_values =", "the interface belongs to this type :param interface: an IP interface :type interface:", "def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24 :type", "import numpy as np from ipec.ip.core import parse_subnet_str from ipec.ip.core import IPStructure from", "convolutional layer fields conv_fields = { 'filter_size': 3, #8 'num_of_feature_maps': 7, #128 'stride_size':", "bits: 20 } # convolutional layer subnet conv_subnet = '0.0.0/4' # pooling layer", "'disabled': DisabledLayer(disabled_subnet, disabled_fields) } def initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise cnn layers with xavier weight", "20 } # convolutional layer subnet conv_subnet = '0.0.0/4' # pooling layer fields", "= { 'kernel_size': 5, 'stride_size': 4, 'type': 1 } # pooling layer subnet", "= '0.0.0/4' # pooling layer fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2,", "is None: str_subnet = FULLYCONNECTED_SUBNET if fields is None: fields = FULLYCONNECTED_FIELDS super(FullyConnectedLayer,", "layer subnet POOLING_SUBNET = '4.32.0.0.0/30' # fully-connected layer fields FULLYCONNECTED_FIELDS = { 'num_of_neurons':", ":rtype: bool \"\"\" return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): \"\"\" ConvLayer class \"\"\" def __init__(self,", "'mean': 9, 'std_dev': 9 } # fully-connected layer subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' #", "pair :type fields: dict \"\"\" if str_subnet is None: str_subnet = DISABLED_SUBNET if", "bits: 11 } # pooling layer subnet pooling_subnet = '16.0/5' # fully-connected layer", "FullyConnectedLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet string,", "self.subnet) self.decoder = Decoder() def encode_2_interface(self, field_values): \"\"\" encode filed values to an", "'type': 1, 'placeholder': 14 # total bits: 19 } # pooling layer subnet", "4 # 0~16/16 #total bits: 20 } # convolutional layer subnet conv_subnet =", "interface: Interface :return: boolean :rtype: bool \"\"\" return self.subnet.check_ip_in_subnet(interface.ip) class ConvLayer(BaseCNNLayer): \"\"\" ConvLayer", "if str_subnet is None: str_subnet = FULLYCONNECTED_SUBNET if fields is None: fields =", "fields: dict \"\"\" if str_subnet is None: str_subnet = POOLING_SUBNET if fields is", "= FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer class \"\"\" def __init__(self,", "class ConvLayer(BaseCNNLayer): \"\"\" ConvLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param", "fields) class DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor", "'4.32.0.0.0/30' # fully-connected layer fields FULLYCONNECTED_FIELDS = { 'num_of_neurons': 11, 'mean': 9, 'std_dev':", "bits: 19 } # fully-connected layer subnet fullyconnected_subnet = '24.0.0/5' # disabled layer", "\"\"\" encode filed values to an IP interface :param field_values: field values :type", "layer subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' # disabled layer fields DISABLED_FIELDS = { 'disabled':", "DisabledLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet string,", "# disabled layer subnet disabled_subnet = '32.0.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet,", "2, #4 #total bits: 12 } # convolutional layer subnet conv_subnet = '0.0/4'", "2, 'stride_size': 2, 'type': 1, 'placeholder': 14 # total bits: 19 } #", "FULLYCONNECTED_FIELDS super(FullyConnectedLayer, self).__init__(str_subnet, fields) class DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer class \"\"\" def __init__(self, str_subnet=None,", "total bits: 11 } # pooling layer subnet pooling_subnet = '16.0/5' # fully-connected", ":param field_values: field values :type field_values: a dict of (field_name, field_value) pairs :return:", "# pooling layer subnet POOLING_SUBNET = '4.32.0.0.0/30' # fully-connected layer fields FULLYCONNECTED_FIELDS =", "5, 'stride_size': 4, 'type': 1 } # pooling layer subnet POOLING_SUBNET = '4.32.0.0.0/30'", "{ 'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 6 # total bits: 11", "convolutional layer subnet conv_subnet = '0.0.0/4' # pooling layer fields pooling_fields = {", "Interface \"\"\" interface = self.encoder.encode_2_interface(field_values) return interface def decode_2_field_values(self, interface): \"\"\" decode an", "fields fullyconnected_fields = { 'num_of_neurons': 11, # total bits: 11 } # fully-connected", "def generate_random_interface(self): \"\"\" generate an IP interface with random settings :rtype: Interface :return:", "cnn layers with 3 bytes IP :return: \"\"\" # convolutional layer fields conv_fields", "ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } def initialise_cnn_layers_with_xavier_weights():", "an IP interface with random settings :rtype: Interface :return: an IP interface \"\"\"", "max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0, max_value+1) field_values[field_name] = rand_value return self.encode_2_interface(field_values) def check_interface_in_type(self, interface):", "xavier weight initialisation :return: \"\"\" # convolutional layer fields conv_fields = { 'filter_size':", "a dict of (field_name, field_value) pairs :rtype: dict \"\"\" field_values = self.decoder.decode_2_field_values(interface) return", "'kernel_size': 2, 'stride_size': 2, 'type': 1, 'placeholder': 14 # total bits: 19 }", "str_subnet self.fields = fields self.subnet = parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields) self.encoder = Encoder(self.ip_structure,", "fully-connected layer fields fullyconnected_fields = { 'num_of_neurons': 11, 'mean': 4, 'std_dev': 4 #", "\"\"\" FullyConnectedLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet", "4, #(0~15-7)/8 'std_dev': 4 # 0~16/16 #total bits: 20 } # convolutional layer", "0~16/16 #total bits: 20 } # convolutional layer subnet conv_subnet = '0.0.0/4' #", "string, e.g. 127.0.0.1/24 :type str_subnet: string :param fields: a dict of (field_name, num_of_bits)", "import Interface from ipec.ip.encoder import Encoder from ipec.ip.decoder import Decoder from ipec.ip.core import", "str_subnet = CONV_SUBNET if fields is None: fields = CONV_FIELDS super(ConvLayer, self).__init__(str_subnet, fields)", "ConvLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet string,", "= { 'disabled': 10, } # disabled layer subnet DISABLED_SUBNET = '4.32.0.4.0/30' def", "self).__init__(str_subnet, fields) class FullyConnectedLayer(BaseCNNLayer): \"\"\" FullyConnectedLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\"", "an IP interface :param field_values: field values :type field_values: a dict of (field_name,", "bits: 12 } # convolutional layer subnet conv_subnet = '0.0/4' # pooling layer", "'num_of_neurons': 11, 'mean': 4, 'std_dev': 4 # total bits: 19 } # fully-connected", "} def initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise cnn layers with xavier weight initialisation :return: \"\"\"", "\"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet: subnet string, e.g. 127.0.0.1/24", "fields: a dict of (field_name, num_of_bits) pair :type fields: dict \"\"\" if str_subnet", "'std_dev': 9 } # fully-connected layer subnet FULLYCONNECTED_SUBNET = '4.0.0.0.0/11' # disabled layer", "'0.0.0/4' # pooling layer fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type':", "parse_subnet_str from ipec.ip.core import IPStructure from ipec.ip.core import Interface from ipec.ip.encoder import Encoder", "return interface def decode_2_field_values(self, interface): \"\"\" decode an IP interface to field values", "FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } def initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise cnn layers with", "disabled_subnet = '32.0/5' return { 'conv': ConvLayer(conv_subnet,conv_fields), 'pooling': PoolingLayer(pooling_subnet, pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields),", "\"\"\" if str_subnet is None: str_subnet = POOLING_SUBNET if fields is None: fields", "DisabledLayer(BaseCNNLayer): \"\"\" DisabledLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet:", "= { 'num_of_neurons': 11, # total bits: 11 } # fully-connected layer subnet", "'0.0/4' # pooling layer fields pooling_fields = { 'kernel_size': 2, 'stride_size': 2, 'type':", "= max_decimal_value_of_binary(num_of_bits) rand_value = np.random.randint(0, max_value+1) field_values[field_name] = rand_value return self.encode_2_interface(field_values) def check_interface_in_type(self,", "'filter_size': 5, 'num_of_feature_maps': 7, 'stride_size': 4, 'mean': 9, 'std_dev': 9 } # convolutional", "= Decoder() def encode_2_interface(self, field_values): \"\"\" encode filed values to an IP interface", ":type fields: dict \"\"\" if str_subnet is None: str_subnet = POOLING_SUBNET if fields", "PoolingLayer(BaseCNNLayer): \"\"\" PoolingLayer class \"\"\" def __init__(self, str_subnet=None, fields=None): \"\"\" constructor :param str_subnet:", "6 # total bits: 11 } # pooling layer subnet pooling_subnet = '16.0/5'", "pooling layer subnet pooling_subnet = '16.0.0/5' # fully-connected layer fields fullyconnected_fields = {", "\"\"\" interface = self.encoder.encode_2_interface(field_values) return interface def decode_2_field_values(self, interface): \"\"\" decode an IP", "field_values = self.decoder.decode_2_field_values(interface) return field_values def generate_random_interface(self): \"\"\" generate an IP interface with", "np.random.randint(0, max_value+1) field_values[field_name] = rand_value return self.encode_2_interface(field_values) def check_interface_in_type(self, interface): \"\"\" check whether", "None: str_subnet = CONV_SUBNET if fields is None: fields = CONV_FIELDS super(ConvLayer, self).__init__(str_subnet,", "is None: str_subnet = CONV_SUBNET if fields is None: fields = CONV_FIELDS super(ConvLayer,", "subnet DISABLED_SUBNET = '4.32.0.4.0/30' def initialise_cnn_layers_3_bytes(): \"\"\" initialise cnn layers with 3 bytes", "'stride_size': 2, #4 #total bits: 12 } # convolutional layer subnet conv_subnet =", "layer interface :rtype: Interface \"\"\" interface = self.encoder.encode_2_interface(field_values) return interface def decode_2_field_values(self, interface):", "is None: str_subnet = DISABLED_SUBNET if fields is None: fields = DISABLED_FIELDS super(DisabledLayer,", "a dict of (field_name, field_value) pairs :return: the layer interface :rtype: Interface \"\"\"", "Interface from ipec.ip.encoder import Encoder from ipec.ip.decoder import Decoder from ipec.ip.core import max_decimal_value_of_binary", "pair :type fields: dict \"\"\" if str_subnet is None: str_subnet = FULLYCONNECTED_SUBNET if", "{ 'num_of_neurons': 11, # total bits: 11 } # fully-connected layer subnet fullyconnected_subnet", "self.str_subnet = str_subnet self.fields = fields self.subnet = parse_subnet_str(str_subnet) self.ip_structure = IPStructure(fields) self.encoder", "fields: dict \"\"\" if str_subnet is None: str_subnet = DISABLED_SUBNET if fields is", "with 3 bytes IP :return: \"\"\" # convolutional layer fields conv_fields = {", "POOLING_FIELDS = { 'kernel_size': 5, 'stride_size': 4, 'type': 1 } # pooling layer", "pooling layer fields POOLING_FIELDS = { 'kernel_size': 5, 'stride_size': 4, 'type': 1 }", "pooling_fields), 'full': FullyConnectedLayer(fullyconnected_subnet, fullyconnected_fields), 'disabled': DisabledLayer(disabled_subnet, disabled_fields) } def initialise_cnn_layers_with_xavier_weights(): \"\"\" initialise cnn", "IP interface :type interface: Interface :return: boolean :rtype: bool \"\"\" return self.subnet.check_ip_in_subnet(interface.ip) class", "\"\"\" initialise cnn layers with xavier weight initialisation :return: \"\"\" # convolutional layer" ]
[ "f\"\\t cdr: {hdr.cdr}\\n\" f\"\\t time of session: {hdr.time_of_session}\\n\" ) else: _log.warning(\"no header present\")", "_log.info(\"server stopped by ctrl+c\") except Exception: rc = 1 _log.exception(\"an error occured\") finally:", "{hdr.cdr}\\n\" f\"\\t time of session: {hdr.time_of_session}\\n\" ) else: _log.warning(\"no header present\") loc =", "return rc def main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser = argparse.ArgumentParser(\"devutil SBD receiver\", add_help=True) parser.add_argument(\"--iface\",", "= 1 _log.exception(\"an error occured\") finally: server.shutdown() return rc def main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)", "def handle(self): msg: MOMessage = self.request hdr = msg.header if hdr: _log.info( \"hdr:", "args = parser.parse_args(sys.argv[1:]) # Если нам предлагают писать данные в stdout, то оно", "blog_stream=blog_stream, ) rc = 0 try: _log.info(\"starting server\") server.serve_forever() except KeyboardInterrupt: _log.info(\"server stopped", "momsn: {hdr.momsn}\\n\" f\"\\t session status: {hdr.session_status}\\n\" f\"\\t imei: {hdr.imei}\\n\" f\"\\t cdr: {hdr.cdr}\\n\" f\"\\t", "f\"\\t time of session: {hdr.time_of_session}\\n\" ) else: _log.warning(\"no header present\") loc = msg.loc_info", "typing.BinaryIO = None): server = MOServiceServer( server_address=(iface, port,), request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True, blog_stream=blog_stream, )", "buffer) stream = None if args.blogfile is not None: _log.info(\"using blog stream as", "blog stream as %s\", args.blogfile) stream = getattr(args.blogfile, \"buffer\", args.blogfile) arg_iface = args.iface", "add_help=True) parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\", nargs=\"?\", type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\", dest=\"port\", nargs=\"?\", type=int, required=True)", "= 0 try: _log.info(\"starting server\") server.serve_forever() except KeyboardInterrupt: _log.info(\"server stopped by ctrl+c\") except", "parser = argparse.ArgumentParser(\"devutil SBD receiver\", add_help=True) parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\", nargs=\"?\", type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\",", "main(iface: str, port: int, blog_stream: typing.BinaryIO = None): server = MOServiceServer( server_address=(iface, port,),", "MOMessage _log = logging.getLogger(__name__) class ReqHandler(BaseRequestHandler): def handle(self): msg: MOMessage = self.request hdr", "{hdr.momsn}\\n\" f\"\\t session status: {hdr.session_status}\\n\" f\"\\t imei: {hdr.imei}\\n\" f\"\\t cdr: {hdr.cdr}\\n\" f\"\\t time", "str, port: int, blog_stream: typing.BinaryIO = None): server = MOServiceServer( server_address=(iface, port,), request_handler_cls=ReqHandler,", "бинарное а текстовое # добудем из него бинарный поток (аттрибут buffer) stream =", "send_ack=True, blog_stream=blog_stream, ) rc = 0 try: _log.info(\"starting server\") server.serve_forever() except KeyboardInterrupt: _log.info(\"server", "добудем из него бинарный поток (аттрибут buffer) stream = None if args.blogfile is", "from socketserver import BaseRequestHandler from ..network.mo_server import MOServiceServer from ..messages.mobile_originated import MOMessage _log", "dest=\"port\", nargs=\"?\", type=int, required=True) parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'), default=None) args = parser.parse_args(sys.argv[1:])", "него бинарный поток (аттрибут buffer) stream = None if args.blogfile is not None:", "args.blogfile) stream = getattr(args.blogfile, \"buffer\", args.blogfile) arg_iface = args.iface arg_port = args.port return", "header present\") loc = msg.loc_info if loc: _log.info( \"loc info:\\n\" f\"\\t lon: {loc.lon}\\n\"", "error occured\") finally: server.shutdown() return rc def main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser = argparse.ArgumentParser(\"devutil", "except KeyboardInterrupt: _log.info(\"server stopped by ctrl+c\") except Exception: rc = 1 _log.exception(\"an error", "parser.parse_args(sys.argv[1:]) # Если нам предлагают писать данные в stdout, то оно не бинарное", "def main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser = argparse.ArgumentParser(\"devutil SBD receiver\", add_help=True) parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\",", "то оно не бинарное а текстовое # добудем из него бинарный поток (аттрибут", "if hdr: _log.info( \"hdr: \\n\" f\"\\t mtmsn: {hdr.mtmsn}\\n\" f\"\\t momsn: {hdr.momsn}\\n\" f\"\\t session", "lat: {loc.lat}\\n\" f\"\\t CEP r: {loc.CEP_radius}\\n\" ) else: _log.warning(\"no location info present\") pay", "by ctrl+c\") except Exception: rc = 1 _log.exception(\"an error occured\") finally: server.shutdown() return", "1 _log.exception(\"an error occured\") finally: server.shutdown() return rc def main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser", "..messages.mobile_originated import MOMessage _log = logging.getLogger(__name__) class ReqHandler(BaseRequestHandler): def handle(self): msg: MOMessage =", "stdout, то оно не бинарное а текстовое # добудем из него бинарный поток", "{hdr.session_status}\\n\" f\"\\t imei: {hdr.imei}\\n\" f\"\\t cdr: {hdr.cdr}\\n\" f\"\\t time of session: {hdr.time_of_session}\\n\" )", "\"loc info:\\n\" f\"\\t lon: {loc.lon}\\n\" f\"\\t lat: {loc.lat}\\n\" f\"\\t CEP r: {loc.CEP_radius}\\n\" )", "parser.add_argument(\"--port\", action=\"store\", dest=\"port\", nargs=\"?\", type=int, required=True) parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'), default=None) args", "= None): server = MOServiceServer( server_address=(iface, port,), request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True, blog_stream=blog_stream, ) rc", "blog_stream: typing.BinaryIO = None): server = MOServiceServer( server_address=(iface, port,), request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True, blog_stream=blog_stream,", "default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\", dest=\"port\", nargs=\"?\", type=int, required=True) parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'), default=None)", "session status: {hdr.session_status}\\n\" f\"\\t imei: {hdr.imei}\\n\" f\"\\t cdr: {hdr.cdr}\\n\" f\"\\t time of session:", "rc def main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser = argparse.ArgumentParser(\"devutil SBD receiver\", add_help=True) parser.add_argument(\"--iface\", action=\"store\",", "from ..network.mo_server import MOServiceServer from ..messages.mobile_originated import MOMessage _log = logging.getLogger(__name__) class ReqHandler(BaseRequestHandler):", "нам предлагают писать данные в stdout, то оно не бинарное а текстовое #", "rc = 0 try: _log.info(\"starting server\") server.serve_forever() except KeyboardInterrupt: _log.info(\"server stopped by ctrl+c\")", "nargs=\"?\", type=int, required=True) parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'), default=None) args = parser.parse_args(sys.argv[1:]) #", "{loc.CEP_radius}\\n\" ) else: _log.warning(\"no location info present\") pay = msg.payload if pay: _log.info(f\"payload:", "= logging.getLogger(__name__) class ReqHandler(BaseRequestHandler): def handle(self): msg: MOMessage = self.request hdr = msg.header", "_log.info( \"loc info:\\n\" f\"\\t lon: {loc.lon}\\n\" f\"\\t lat: {loc.lat}\\n\" f\"\\t CEP r: {loc.CEP_radius}\\n\"", "<reponame>shostakovichs-spacecraft-factory/cansat-2018-2019 import typing import logging import sys import argparse from socketserver import BaseRequestHandler", "bind_and_activate=True, send_ack=True, blog_stream=blog_stream, ) rc = 0 try: _log.info(\"starting server\") server.serve_forever() except KeyboardInterrupt:", "args.blogfile) arg_iface = args.iface arg_port = args.port return main(arg_iface, arg_port, stream) if __name__", "rc = 1 _log.exception(\"an error occured\") finally: server.shutdown() return rc def main_exec(): logging.basicConfig(level=logging.DEBUG,", "pay = msg.payload if pay: _log.info(f\"payload: {pay.raw_payload}\") else: _log.warning(\"no payload present\") # noinspection", "time of session: {hdr.time_of_session}\\n\" ) else: _log.warning(\"no header present\") loc = msg.loc_info if", "status: {hdr.session_status}\\n\" f\"\\t imei: {hdr.imei}\\n\" f\"\\t cdr: {hdr.cdr}\\n\" f\"\\t time of session: {hdr.time_of_session}\\n\"", "else: _log.warning(\"no location info present\") pay = msg.payload if pay: _log.info(f\"payload: {pay.raw_payload}\") else:", "not None: _log.info(\"using blog stream as %s\", args.blogfile) stream = getattr(args.blogfile, \"buffer\", args.blogfile)", "arg_iface = args.iface arg_port = args.port return main(arg_iface, arg_port, stream) if __name__ ==", "info present\") pay = msg.payload if pay: _log.info(f\"payload: {pay.raw_payload}\") else: _log.warning(\"no payload present\")", "nargs=\"?\", type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\", dest=\"port\", nargs=\"?\", type=int, required=True) parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\", nargs=\"?\",", "r: {loc.CEP_radius}\\n\" ) else: _log.warning(\"no location info present\") pay = msg.payload if pay:", "_log.info(f\"payload: {pay.raw_payload}\") else: _log.warning(\"no payload present\") # noinspection PyBroadException def main(iface: str, port:", "не бинарное а текстовое # добудем из него бинарный поток (аттрибут buffer) stream", "hdr = msg.header if hdr: _log.info( \"hdr: \\n\" f\"\\t mtmsn: {hdr.mtmsn}\\n\" f\"\\t momsn:", "receiver\", add_help=True) parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\", nargs=\"?\", type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\", dest=\"port\", nargs=\"?\", type=int,", "= msg.payload if pay: _log.info(f\"payload: {pay.raw_payload}\") else: _log.warning(\"no payload present\") # noinspection PyBroadException", "_log.exception(\"an error occured\") finally: server.shutdown() return rc def main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser =", "request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True, blog_stream=blog_stream, ) rc = 0 try: _log.info(\"starting server\") server.serve_forever() except", "(аттрибут buffer) stream = None if args.blogfile is not None: _log.info(\"using blog stream", "f\"\\t momsn: {hdr.momsn}\\n\" f\"\\t session status: {hdr.session_status}\\n\" f\"\\t imei: {hdr.imei}\\n\" f\"\\t cdr: {hdr.cdr}\\n\"", "action=\"store\", dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'), default=None) args = parser.parse_args(sys.argv[1:]) # Если нам предлагают писать", "mtmsn: {hdr.mtmsn}\\n\" f\"\\t momsn: {hdr.momsn}\\n\" f\"\\t session status: {hdr.session_status}\\n\" f\"\\t imei: {hdr.imei}\\n\" f\"\\t", "present\") # noinspection PyBroadException def main(iface: str, port: int, blog_stream: typing.BinaryIO = None):", "{hdr.mtmsn}\\n\" f\"\\t momsn: {hdr.momsn}\\n\" f\"\\t session status: {hdr.session_status}\\n\" f\"\\t imei: {hdr.imei}\\n\" f\"\\t cdr:", "import typing import logging import sys import argparse from socketserver import BaseRequestHandler from", "is not None: _log.info(\"using blog stream as %s\", args.blogfile) stream = getattr(args.blogfile, \"buffer\",", "payload present\") # noinspection PyBroadException def main(iface: str, port: int, blog_stream: typing.BinaryIO =", "None: _log.info(\"using blog stream as %s\", args.blogfile) stream = getattr(args.blogfile, \"buffer\", args.blogfile) arg_iface", "_log.warning(\"no header present\") loc = msg.loc_info if loc: _log.info( \"loc info:\\n\" f\"\\t lon:", "server = MOServiceServer( server_address=(iface, port,), request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True, blog_stream=blog_stream, ) rc = 0", "= self.request hdr = msg.header if hdr: _log.info( \"hdr: \\n\" f\"\\t mtmsn: {hdr.mtmsn}\\n\"", "session: {hdr.time_of_session}\\n\" ) else: _log.warning(\"no header present\") loc = msg.loc_info if loc: _log.info(", "self.request hdr = msg.header if hdr: _log.info( \"hdr: \\n\" f\"\\t mtmsn: {hdr.mtmsn}\\n\" f\"\\t", "getattr(args.blogfile, \"buffer\", args.blogfile) arg_iface = args.iface arg_port = args.port return main(arg_iface, arg_port, stream)", "{pay.raw_payload}\") else: _log.warning(\"no payload present\") # noinspection PyBroadException def main(iface: str, port: int,", "оно не бинарное а текстовое # добудем из него бинарный поток (аттрибут buffer)", "поток (аттрибут buffer) stream = None if args.blogfile is not None: _log.info(\"using blog", "import argparse from socketserver import BaseRequestHandler from ..network.mo_server import MOServiceServer from ..messages.mobile_originated import", "info:\\n\" f\"\\t lon: {loc.lon}\\n\" f\"\\t lat: {loc.lat}\\n\" f\"\\t CEP r: {loc.CEP_radius}\\n\" ) else:", "PyBroadException def main(iface: str, port: int, blog_stream: typing.BinaryIO = None): server = MOServiceServer(", "{hdr.imei}\\n\" f\"\\t cdr: {hdr.cdr}\\n\" f\"\\t time of session: {hdr.time_of_session}\\n\" ) else: _log.warning(\"no header", "stream = getattr(args.blogfile, \"buffer\", args.blogfile) arg_iface = args.iface arg_port = args.port return main(arg_iface,", "# noinspection PyBroadException def main(iface: str, port: int, blog_stream: typing.BinaryIO = None): server", "socketserver import BaseRequestHandler from ..network.mo_server import MOServiceServer from ..messages.mobile_originated import MOMessage _log =", "loc = msg.loc_info if loc: _log.info( \"loc info:\\n\" f\"\\t lon: {loc.lon}\\n\" f\"\\t lat:", "main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser = argparse.ArgumentParser(\"devutil SBD receiver\", add_help=True) parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\", nargs=\"?\",", "present\") pay = msg.payload if pay: _log.info(f\"payload: {pay.raw_payload}\") else: _log.warning(\"no payload present\") #", "nargs=\"?\", type=argparse.FileType('wb'), default=None) args = parser.parse_args(sys.argv[1:]) # Если нам предлагают писать данные в", "loc: _log.info( \"loc info:\\n\" f\"\\t lon: {loc.lon}\\n\" f\"\\t lat: {loc.lat}\\n\" f\"\\t CEP r:", "MOServiceServer from ..messages.mobile_originated import MOMessage _log = logging.getLogger(__name__) class ReqHandler(BaseRequestHandler): def handle(self): msg:", "hdr: _log.info( \"hdr: \\n\" f\"\\t mtmsn: {hdr.mtmsn}\\n\" f\"\\t momsn: {hdr.momsn}\\n\" f\"\\t session status:", "present\") loc = msg.loc_info if loc: _log.info( \"loc info:\\n\" f\"\\t lon: {loc.lon}\\n\" f\"\\t", "from ..messages.mobile_originated import MOMessage _log = logging.getLogger(__name__) class ReqHandler(BaseRequestHandler): def handle(self): msg: MOMessage", "KeyboardInterrupt: _log.info(\"server stopped by ctrl+c\") except Exception: rc = 1 _log.exception(\"an error occured\")", "None if args.blogfile is not None: _log.info(\"using blog stream as %s\", args.blogfile) stream", "_log.info(\"using blog stream as %s\", args.blogfile) stream = getattr(args.blogfile, \"buffer\", args.blogfile) arg_iface =", "= None if args.blogfile is not None: _log.info(\"using blog stream as %s\", args.blogfile)", "MOServiceServer( server_address=(iface, port,), request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True, blog_stream=blog_stream, ) rc = 0 try: _log.info(\"starting", "BaseRequestHandler from ..network.mo_server import MOServiceServer from ..messages.mobile_originated import MOMessage _log = logging.getLogger(__name__) class", "try: _log.info(\"starting server\") server.serve_forever() except KeyboardInterrupt: _log.info(\"server stopped by ctrl+c\") except Exception: rc", "handle(self): msg: MOMessage = self.request hdr = msg.header if hdr: _log.info( \"hdr: \\n\"", "parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\", nargs=\"?\", type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\", dest=\"port\", nargs=\"?\", type=int, required=True) parser.add_argument(\"--blog-file\",", "f\"\\t mtmsn: {hdr.mtmsn}\\n\" f\"\\t momsn: {hdr.momsn}\\n\" f\"\\t session status: {hdr.session_status}\\n\" f\"\\t imei: {hdr.imei}\\n\"", "%s\", args.blogfile) stream = getattr(args.blogfile, \"buffer\", args.blogfile) arg_iface = args.iface arg_port = args.port", "_log.info( \"hdr: \\n\" f\"\\t mtmsn: {hdr.mtmsn}\\n\" f\"\\t momsn: {hdr.momsn}\\n\" f\"\\t session status: {hdr.session_status}\\n\"", "f\"\\t lon: {loc.lon}\\n\" f\"\\t lat: {loc.lat}\\n\" f\"\\t CEP r: {loc.CEP_radius}\\n\" ) else: _log.warning(\"no", "= getattr(args.blogfile, \"buffer\", args.blogfile) arg_iface = args.iface arg_port = args.port return main(arg_iface, arg_port,", "import sys import argparse from socketserver import BaseRequestHandler from ..network.mo_server import MOServiceServer from", "= msg.loc_info if loc: _log.info( \"loc info:\\n\" f\"\\t lon: {loc.lon}\\n\" f\"\\t lat: {loc.lat}\\n\"", "_log.warning(\"no payload present\") # noinspection PyBroadException def main(iface: str, port: int, blog_stream: typing.BinaryIO", "cdr: {hdr.cdr}\\n\" f\"\\t time of session: {hdr.time_of_session}\\n\" ) else: _log.warning(\"no header present\") loc", "finally: server.shutdown() return rc def main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser = argparse.ArgumentParser(\"devutil SBD receiver\",", "в stdout, то оно не бинарное а текстовое # добудем из него бинарный", "dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'), default=None) args = parser.parse_args(sys.argv[1:]) # Если нам предлагают писать данные", "f\"\\t imei: {hdr.imei}\\n\" f\"\\t cdr: {hdr.cdr}\\n\" f\"\\t time of session: {hdr.time_of_session}\\n\" ) else:", "а текстовое # добудем из него бинарный поток (аттрибут buffer) stream = None", "args.iface arg_port = args.port return main(arg_iface, arg_port, stream) if __name__ == \"__main__\": exit(main_exec())", "int, blog_stream: typing.BinaryIO = None): server = MOServiceServer( server_address=(iface, port,), request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True,", "_log.warning(\"no location info present\") pay = msg.payload if pay: _log.info(f\"payload: {pay.raw_payload}\") else: _log.warning(\"no", "def main(iface: str, port: int, blog_stream: typing.BinaryIO = None): server = MOServiceServer( server_address=(iface,", "port,), request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True, blog_stream=blog_stream, ) rc = 0 try: _log.info(\"starting server\") server.serve_forever()", "{hdr.time_of_session}\\n\" ) else: _log.warning(\"no header present\") loc = msg.loc_info if loc: _log.info( \"loc", "argparse.ArgumentParser(\"devutil SBD receiver\", add_help=True) parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\", nargs=\"?\", type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\", dest=\"port\",", "typing import logging import sys import argparse from socketserver import BaseRequestHandler from ..network.mo_server", "parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'), default=None) args = parser.parse_args(sys.argv[1:]) # Если нам предлагают", "sys import argparse from socketserver import BaseRequestHandler from ..network.mo_server import MOServiceServer from ..messages.mobile_originated", "type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\", dest=\"port\", nargs=\"?\", type=int, required=True) parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'),", "msg.loc_info if loc: _log.info( \"loc info:\\n\" f\"\\t lon: {loc.lon}\\n\" f\"\\t lat: {loc.lat}\\n\" f\"\\t", "server.shutdown() return rc def main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser = argparse.ArgumentParser(\"devutil SBD receiver\", add_help=True)", "dest=\"iface\", nargs=\"?\", type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\", dest=\"port\", nargs=\"?\", type=int, required=True) parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\",", "..network.mo_server import MOServiceServer from ..messages.mobile_originated import MOMessage _log = logging.getLogger(__name__) class ReqHandler(BaseRequestHandler): def", "stream as %s\", args.blogfile) stream = getattr(args.blogfile, \"buffer\", args.blogfile) arg_iface = args.iface arg_port", "msg.payload if pay: _log.info(f\"payload: {pay.raw_payload}\") else: _log.warning(\"no payload present\") # noinspection PyBroadException def", "of session: {hdr.time_of_session}\\n\" ) else: _log.warning(\"no header present\") loc = msg.loc_info if loc:", "args.blogfile is not None: _log.info(\"using blog stream as %s\", args.blogfile) stream = getattr(args.blogfile,", "= msg.header if hdr: _log.info( \"hdr: \\n\" f\"\\t mtmsn: {hdr.mtmsn}\\n\" f\"\\t momsn: {hdr.momsn}\\n\"", "import logging import sys import argparse from socketserver import BaseRequestHandler from ..network.mo_server import", "_log = logging.getLogger(__name__) class ReqHandler(BaseRequestHandler): def handle(self): msg: MOMessage = self.request hdr =", "stream = None if args.blogfile is not None: _log.info(\"using blog stream as %s\",", "f\"\\t lat: {loc.lat}\\n\" f\"\\t CEP r: {loc.CEP_radius}\\n\" ) else: _log.warning(\"no location info present\")", "logging import sys import argparse from socketserver import BaseRequestHandler from ..network.mo_server import MOServiceServer", "stream=sys.stdout) parser = argparse.ArgumentParser(\"devutil SBD receiver\", add_help=True) parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\", nargs=\"?\", type=str, default=\"0.0.0.0\")", "# добудем из него бинарный поток (аттрибут buffer) stream = None if args.blogfile", "logging.getLogger(__name__) class ReqHandler(BaseRequestHandler): def handle(self): msg: MOMessage = self.request hdr = msg.header if", "else: _log.warning(\"no payload present\") # noinspection PyBroadException def main(iface: str, port: int, blog_stream:", "type=int, required=True) parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'), default=None) args = parser.parse_args(sys.argv[1:]) # Если", "None): server = MOServiceServer( server_address=(iface, port,), request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True, blog_stream=blog_stream, ) rc =", "данные в stdout, то оно не бинарное а текстовое # добудем из него", "if args.blogfile is not None: _log.info(\"using blog stream as %s\", args.blogfile) stream =", "server\") server.serve_forever() except KeyboardInterrupt: _log.info(\"server stopped by ctrl+c\") except Exception: rc = 1", "if pay: _log.info(f\"payload: {pay.raw_payload}\") else: _log.warning(\"no payload present\") # noinspection PyBroadException def main(iface:", "0 try: _log.info(\"starting server\") server.serve_forever() except KeyboardInterrupt: _log.info(\"server stopped by ctrl+c\") except Exception:", "ReqHandler(BaseRequestHandler): def handle(self): msg: MOMessage = self.request hdr = msg.header if hdr: _log.info(", "type=argparse.FileType('wb'), default=None) args = parser.parse_args(sys.argv[1:]) # Если нам предлагают писать данные в stdout,", "msg: MOMessage = self.request hdr = msg.header if hdr: _log.info( \"hdr: \\n\" f\"\\t", "imei: {hdr.imei}\\n\" f\"\\t cdr: {hdr.cdr}\\n\" f\"\\t time of session: {hdr.time_of_session}\\n\" ) else: _log.warning(\"no", "ctrl+c\") except Exception: rc = 1 _log.exception(\"an error occured\") finally: server.shutdown() return rc", "{loc.lat}\\n\" f\"\\t CEP r: {loc.CEP_radius}\\n\" ) else: _log.warning(\"no location info present\") pay =", "action=\"store\", dest=\"iface\", nargs=\"?\", type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\", dest=\"port\", nargs=\"?\", type=int, required=True) parser.add_argument(\"--blog-file\", action=\"store\",", "\"hdr: \\n\" f\"\\t mtmsn: {hdr.mtmsn}\\n\" f\"\\t momsn: {hdr.momsn}\\n\" f\"\\t session status: {hdr.session_status}\\n\" f\"\\t", "{loc.lon}\\n\" f\"\\t lat: {loc.lat}\\n\" f\"\\t CEP r: {loc.CEP_radius}\\n\" ) else: _log.warning(\"no location info", "= parser.parse_args(sys.argv[1:]) # Если нам предлагают писать данные в stdout, то оно не", "argparse from socketserver import BaseRequestHandler from ..network.mo_server import MOServiceServer from ..messages.mobile_originated import MOMessage", "noinspection PyBroadException def main(iface: str, port: int, blog_stream: typing.BinaryIO = None): server =", "import MOMessage _log = logging.getLogger(__name__) class ReqHandler(BaseRequestHandler): def handle(self): msg: MOMessage = self.request", "msg.header if hdr: _log.info( \"hdr: \\n\" f\"\\t mtmsn: {hdr.mtmsn}\\n\" f\"\\t momsn: {hdr.momsn}\\n\" f\"\\t", "= args.iface arg_port = args.port return main(arg_iface, arg_port, stream) if __name__ == \"__main__\":", "else: _log.warning(\"no header present\") loc = msg.loc_info if loc: _log.info( \"loc info:\\n\" f\"\\t", "except Exception: rc = 1 _log.exception(\"an error occured\") finally: server.shutdown() return rc def", "= argparse.ArgumentParser(\"devutil SBD receiver\", add_help=True) parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\", nargs=\"?\", type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\",", "# Если нам предлагают писать данные в stdout, то оно не бинарное а", "as %s\", args.blogfile) stream = getattr(args.blogfile, \"buffer\", args.blogfile) arg_iface = args.iface arg_port =", "required=True) parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'), default=None) args = parser.parse_args(sys.argv[1:]) # Если нам", "lon: {loc.lon}\\n\" f\"\\t lat: {loc.lat}\\n\" f\"\\t CEP r: {loc.CEP_radius}\\n\" ) else: _log.warning(\"no location", "\\n\" f\"\\t mtmsn: {hdr.mtmsn}\\n\" f\"\\t momsn: {hdr.momsn}\\n\" f\"\\t session status: {hdr.session_status}\\n\" f\"\\t imei:", "= MOServiceServer( server_address=(iface, port,), request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True, blog_stream=blog_stream, ) rc = 0 try:", "server_address=(iface, port,), request_handler_cls=ReqHandler, bind_and_activate=True, send_ack=True, blog_stream=blog_stream, ) rc = 0 try: _log.info(\"starting server\")", "_log.info(\"starting server\") server.serve_forever() except KeyboardInterrupt: _log.info(\"server stopped by ctrl+c\") except Exception: rc =", "if loc: _log.info( \"loc info:\\n\" f\"\\t lon: {loc.lon}\\n\" f\"\\t lat: {loc.lat}\\n\" f\"\\t CEP", "occured\") finally: server.shutdown() return rc def main_exec(): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser = argparse.ArgumentParser(\"devutil SBD", "писать данные в stdout, то оно не бинарное а текстовое # добудем из", "import MOServiceServer from ..messages.mobile_originated import MOMessage _log = logging.getLogger(__name__) class ReqHandler(BaseRequestHandler): def handle(self):", ") rc = 0 try: _log.info(\"starting server\") server.serve_forever() except KeyboardInterrupt: _log.info(\"server stopped by", "import BaseRequestHandler from ..network.mo_server import MOServiceServer from ..messages.mobile_originated import MOMessage _log = logging.getLogger(__name__)", "SBD receiver\", add_help=True) parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\", nargs=\"?\", type=str, default=\"0.0.0.0\") parser.add_argument(\"--port\", action=\"store\", dest=\"port\", nargs=\"?\",", "default=None) args = parser.parse_args(sys.argv[1:]) # Если нам предлагают писать данные в stdout, то", "предлагают писать данные в stdout, то оно не бинарное а текстовое # добудем", "server.serve_forever() except KeyboardInterrupt: _log.info(\"server stopped by ctrl+c\") except Exception: rc = 1 _log.exception(\"an", "текстовое # добудем из него бинарный поток (аттрибут buffer) stream = None if", "class ReqHandler(BaseRequestHandler): def handle(self): msg: MOMessage = self.request hdr = msg.header if hdr:", "f\"\\t CEP r: {loc.CEP_radius}\\n\" ) else: _log.warning(\"no location info present\") pay = msg.payload", "port: int, blog_stream: typing.BinaryIO = None): server = MOServiceServer( server_address=(iface, port,), request_handler_cls=ReqHandler, bind_and_activate=True,", "action=\"store\", dest=\"port\", nargs=\"?\", type=int, required=True) parser.add_argument(\"--blog-file\", action=\"store\", dest=\"blogfile\", nargs=\"?\", type=argparse.FileType('wb'), default=None) args =", "Если нам предлагают писать данные в stdout, то оно не бинарное а текстовое", "CEP r: {loc.CEP_radius}\\n\" ) else: _log.warning(\"no location info present\") pay = msg.payload if", "Exception: rc = 1 _log.exception(\"an error occured\") finally: server.shutdown() return rc def main_exec():", "location info present\") pay = msg.payload if pay: _log.info(f\"payload: {pay.raw_payload}\") else: _log.warning(\"no payload", "бинарный поток (аттрибут buffer) stream = None if args.blogfile is not None: _log.info(\"using", "из него бинарный поток (аттрибут buffer) stream = None if args.blogfile is not", ") else: _log.warning(\"no header present\") loc = msg.loc_info if loc: _log.info( \"loc info:\\n\"", "pay: _log.info(f\"payload: {pay.raw_payload}\") else: _log.warning(\"no payload present\") # noinspection PyBroadException def main(iface: str,", "\"buffer\", args.blogfile) arg_iface = args.iface arg_port = args.port return main(arg_iface, arg_port, stream) if", ") else: _log.warning(\"no location info present\") pay = msg.payload if pay: _log.info(f\"payload: {pay.raw_payload}\")", "logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) parser = argparse.ArgumentParser(\"devutil SBD receiver\", add_help=True) parser.add_argument(\"--iface\", action=\"store\", dest=\"iface\", nargs=\"?\", type=str,", "f\"\\t session status: {hdr.session_status}\\n\" f\"\\t imei: {hdr.imei}\\n\" f\"\\t cdr: {hdr.cdr}\\n\" f\"\\t time of", "stopped by ctrl+c\") except Exception: rc = 1 _log.exception(\"an error occured\") finally: server.shutdown()", "MOMessage = self.request hdr = msg.header if hdr: _log.info( \"hdr: \\n\" f\"\\t mtmsn:" ]
[ "order either when the plan is modified or when the plan is saved.", "includes are defined, the relevant siteSettings.py section might look like this: PIPELINE_JS =", "by the mapviews.js # to support custom command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = () #", "settings object will not know about the default value! ### # DJANGO-PIPELINE ADDENDUM:", "), 'output_filename': 'js/simulator.js', } _thisDir = os.path.dirname(__file__) # Set to true to make", "Really it should not be jammed in that file. PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js',", "remove anything you won't be using. XGDS_PLANNER_PLAN_EXPORTERS = ( ('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance',", "# XGDS_PLANNER_SCHEMAS: A list of XPJSON schemas available in the # planner. Notes:", "or save. MODIFY = 'Modify' SAVE = 'Save' DELETE = 'Delete' JAVASCRIPT =", "exporterClass). This is the entire list of everything that # xgds_planner provides; remove", "('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), ) # kml root from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL", "Django settings module. Let's say one such parameter is FOO. The default value", "= {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS = {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ### \"\"\" import os from", "JAVASCRIPT)] # they will be executed in order either when the plan is", "like this: FOO = 'a better value' Other modules can access the value", "#TODO update, qunit is installed with bower 'xgds_planner2_testing': { 'source_filenames': ( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js',", "be using. XGDS_PLANNER_PLAN_EXPORTERS = ( ('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj',", "to the XPJSON PlanSchema and # PlanLibrary source files. One of the steps", "That settings object will not know about the default value! ### # DJANGO-PIPELINE", "= False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates', 'handlebars'), os.path.join('xgds_map_server', 'templates', 'handlebars',", "WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See", "site frame. # It is initialized by calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES = [] XGDS_MAP_SERVER_JS_MAP", "or when the plan is saved. # If it is a Python method,", "= False XGDS_PLANNER_TEST_SKIP_DOC = False XGDS_PLANNER_TEST_SKIP_PLAN_REST = False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE =", "site. They point to the XPJSON PlanSchema and # PlanLibrary source files. One", "you need it. # It must add a json dictionary called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD", "it. # It must add a json dictionary called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext'", "'type', 'id']} XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid', 'dateModified', 'jsonPlan', 'deleted', 'readOnly',", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either", "# * @simulatorUrl is relative to STATIC_URL. It should point to a JavaScript", "geocamUtil.SettingsUtil import getOrCreateArray \"\"\" This app may define some new parameters that can", "this to the site id from your WITHIN plan library. XGDS_PLANNER_DEFAULT_SITE = ('IRG',", "[(MODIFY,'my.planner.modify.callback', PYTHON), # (SAVE,'my.planner.save.callback', JAVASCRIPT)] # they will be executed in order either", "in that file. PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename': 'js/custom_map.js' }", "'output_filename': 'css/planner_tests.css', }, } PIPELINE = getOrCreateDict('PIPELINE') PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT')", "settings module, like this: FOO = 'a better value' Other modules can access", "simulator model for the schema. The model is loaded # as part of", "'output_filename': 'js/custom_map.js' } # Override this compilation of javascript files for your planner", "WITHIN plan library. XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames') # Method to add stuff to", "copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable", "of FOO like this: from django.conf import settings print settings.FOO Don't try to", "invoked from schedulePlans call in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None # OVERRIDE this in", "'s' XGDS_PLANNER_COMMAND_MONIKER = \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER + 's' #TODO to have a", "command in a custom way. # see xgds_kn for example XGDS_PLANNER_COMMAND_RENDERERS = {}", "this: from django.conf import settings print settings.FOO Don't try to get the value", "django.conf.settings. That settings object will not know about the default value! ### #", "XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER = \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER = \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL", "{} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ### \"\"\" import os from geocamUtil.SettingsUtil import getOrCreateDict XGDS_PLANNER_OFFLINE =", "= XGDS_PLANNER_COMMAND_MONIKER + 's' #TODO to have a default site frame in the", "@simulatorUrl is relative to STATIC_URL. It should point to a JavaScript # file", "comma is critical because this makes it a tuple ), 'output_filename': 'js/simulator.js', }", "the plan is saved. # If it is a Python method, it will", "library XGDS_PLANNER_PLOTS = {} # Uncomment the below to see plannerSamplePlot.js, and include", "'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js'", "('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing comma is critical because this makes it a tuple", "{} # Uncomment the below to see plannerSamplePlot.js, and include it in planner_app_base", "like this: PIPELINE_JS = {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS = {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ### \"\"\"", "modification or save. MODIFY = 'Modify' SAVE = 'Save' DELETE = 'Delete' JAVASCRIPT", "must add a json dictionary called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext' # Method to", "'deleted', 'readOnly', 'numStations', 'numSegments', 'numCommands', 'stats' ] } # If you have callbacks", "= {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing comma is critical because this makes it", "FOO = 'a better value' Other modules can access the value of FOO", "admin for the site doesn't like the default value, they can override it", "by # compileXpjson.py and the simplified/canonical versions are written # to the build/static/xgds_planner2", "XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER = \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER + 's' XGDS_PLANNER_SEGMENT_MONIKER =", "file. PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename': 'js/custom_map.js' } # Override", "initialized by calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES = [] XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol':", "in display # IMPORTANT YOU MUST INCLUDE THIS IN SITE SETTINGS # TEMPLATE_CONTEXT_PROCESSORS", "Administration. # All rights reserved. # # The xGDS platform is licensed under", "\"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\": \"xgds_planner2.ExampleSimulator\", # }, \"GenericVehicle\": { \"schemaSource\":", "XGDS_PLANNER_SEGMENT_MONIKER + 's' XGDS_PLANNER_COMMAND_MONIKER = \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER + 's' #TODO to", "'hiddenColumns': ['stations', 'type', 'id']} XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid', 'dateModified', 'jsonPlan',", "a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by", "( 'jquery-ui-dist/jquery-ui.min.css', # for some reason compressing this in the css does not", "mapviews.js # to render a command in a custom way. # see xgds_kn", "jammed in that file. PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename': 'js/custom_map.js'", "this module to work, the site-level config file (siteSettings.py), must merge the XGDS_PLANNER_PIPELINE_JS", "ADDENDUM: For this module to work, the site-level config file (siteSettings.py), must merge", "reserved. # # The xGDS platform is licensed under the Apache License, Version", "'js/custom_map.js' } # Override this compilation of javascript files for your planner and", "'numStations', 'numSegments', 'numCommands', 'stats' ] } # If you have callbacks to be", "planner. Notes: # # * @schemaSource and @librarySource are paths relative to the", "part of the client-side planner JS. # # * @simulator is the JavaScript", "views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES = [] XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL,", "} # Override this compilation of javascript files for your planner and simulator", "Method to add stuff to planExecution if you are not doing the basic", "tab is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null' # This is used to hold a", "the License for the # specific language governing permissions and limitations under the", "# for some reason compressing this in the css does not work so", "callbacks to be connected to the planner, register them as follows # XGDS_PLANNER_CALLBACK", "EXEC = 'Exec' XGDS_PLANNER_CALLBACK = [] # If you will be plotting values", "kml root from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS = 3 XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\"", "modified or when the plan is saved. # If it is a Python", "), 'output_filename': 'js/custom_map.js' } # Override this compilation of javascript files for your", "is a javascript method, it will happen on the front end after modification", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "PIPELINE = getOrCreateDict('PIPELINE') PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT') # if we are", "support custom command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = () # XGDS_PLANNER_COMMAND_RENDERERS - A dict of", "or implied. See the License for the # specific language governing permissions and", "below to see plannerSamplePlot.js, and include it in planner_app_base # XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot'", "not use this file except in compliance with the License. # You may", "\"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER + 's' XGDS_PLANNER_COMMAND_MONIKER = \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER +", "and include it in planner_app_base # XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot' # Turn on to", "the build/static/xgds_planner2 directory. The client-side JS # reads the simplified versions from there.", "XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null' # This is used to hold a map of site", "# Test skipping variables. Set to true if code somewhere else overrides #", "back end after modification or save. MODIFY = 'Modify' SAVE = 'Save' DELETE", "\"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\", # the namespace within the simulator js } }", "# base directory for the site. They point to the XPJSON PlanSchema and", "try to get the value of FOO from django.conf.settings. That settings object will", "(true) then include the scheduling & flight management features in display # IMPORTANT", "Let's say one such parameter is FOO. The default value for FOO is", "XGDS_PLANNER_SCHEMAS = { # \"test\": { # \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", #", "XGDS_PLANNER_TEST_SKIP_INDEX = False XGDS_PLANNER_TEST_SKIP_EDIT = False XGDS_PLANNER_TEST_SKIP_DOC = False XGDS_PLANNER_TEST_SKIP_PLAN_REST = False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT", "client-side planner JS. # # * @simulator is the JavaScript name of the", "in the flot plot chart, register functions here. # You must also then", "'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ), 'output_filename': 'js/compiled_planner_app.js' }, # must create 'simulator' entry", "site doesn't like the default value, they can override it in the site-level", "Python method, it will happen on the back end after modification or save.", "bearing distance be in crs units XGDS_PLANNER_CRS_UNITS_DEFAULT = False # list of (formatCode,", "'css/planner_tests.css', }, } PIPELINE = getOrCreateDict('PIPELINE') PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT') #", "must create 'simulator' entry in top-level siteSettings.py #TODO update, qunit is installed with", "map for right now. Really it should not be jammed in that file.", "override and register your own method if you need it. # It must", "dictionary called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext' # Method to add stuff to planExecution", "# If you have callbacks to be connected to the planner, register them", "to see plannerSamplePlot.js, and include it in planner_app_base # XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot' #", "( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ), 'output_filename': 'js/planner_tests.js' } } XGDS_PLANNER_PIPELINE_CSS = { 'planner_app': {", "getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid', 'dateModified', 'jsonPlan', 'deleted', 'readOnly', 'numStations', 'numSegments', 'numCommands', 'stats'", "flot plot chart, register functions here. # You must also then include the", "of site frames, so we can convert lat/long to the closest site frame.", "to add stuff to planExecution if you are not doing the basic planExecution.", "the settings, now they are set in the PlanSchema database table. # XGDS_PLANNER_SCHEMAS", "an 'exec' method, it will happen on the back end after modification or", "Set to true to make the bearing distance be in crs units XGDS_PLANNER_CRS_UNITS_DEFAULT", "XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS = 3 XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER = \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL", "source files. One of the steps within 'manage.py prep' # is 'prepapps'. During", "['uuid', 'dateModified', 'jsonPlan', 'deleted', 'readOnly', 'numStations', 'numSegments', 'numCommands', 'stats' ] } # If", "\"\"\" This app may define some new parameters that can be modified in", "False # Don't load google earth if this is true XGDS_PLANNER_MAP_ROTATION_HANDLES = True", "governing permissions and limitations under the License. #__END_LICENSE__ from geocamUtil.SettingsUtil import getOrCreateArray \"\"\"", "get the value of FOO from django.conf.settings. That settings object will not know", "plan create, note that since it's in site settings you can't have a", "parameters that can be modified in the Django settings module. Let's say one", "this compilation of javascript files for your planner and simulator PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames':", "in this file, like this: FOO = 'my default value' If the admin", "in the Django settings module. Let's say one such parameter is FOO. The", "example XGDS_PLANNER_COMMAND_RENDERERS = {} # If this is defined (true) then include the", "is FOO. The default value for FOO is defined in this file, like", "to call after the links tab is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null' # This", "For this module to work, the site-level config file (siteSettings.py), must merge the", "'.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), )", "the mapviews.js # to render a command in a custom way. # see", "table. # XGDS_PLANNER_SCHEMAS = [ # ] # XGDS_PLANNER_SCHEMAS: A list of XPJSON", "PlanSchema and # PlanLibrary source files. One of the steps within 'manage.py prep'", "XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to be included by the mapviews.js # to support custom", "# extra JavaScript callback to call after the links tab is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK", "function implemented. # Dictionary should be: legible name: namespace of library XGDS_PLANNER_PLOTS =", "no other django-pipeline includes are defined, the relevant siteSettings.py section might look like", "#__BEGIN_LICENSE__ # Copyright (c) 2015, United States Government, as represented by the #", "be plotting values in the flot plot chart, register functions here. # You", "'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ), 'output_filename': 'js/compiled_planner_app.js' },", "a json dictionary called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext' # Method to add stuff", "National Aeronautics and Space Administration. # All rights reserved. # # The xGDS", "@simulator is the JavaScript name of the simulator module defined by # the", "are using the planner we want to add uploadJson into the custom map", "'xgds_planner2/js/tests.js', ), 'output_filename': 'js/planner_tests.js' } } XGDS_PLANNER_PIPELINE_CSS = { 'planner_app': { 'source_filenames': (", "# as part of the client-side planner JS. # # * @simulator is", "connected to the planner, register them as follows # XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON),", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY", "# xgds_planner provides; remove anything you won't be using. XGDS_PLANNER_PLAN_EXPORTERS = ( ('xpjson',", "'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename': 'js/custom_map.js' } # Override this compilation of javascript files", "about the default value! ### # DJANGO-PIPELINE ADDENDUM: For this module to work,", "does not work so it's separate in the planner_app # 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css',", "you may not use this file except in compliance with the License. #", "(c) 2015, United States Government, as represented by the # Administrator of the", "the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR #", "'sample_plot' # Turn on to enable plan validation support and UI XGDS_PLANNER_VALIDATION =", "the scheduling & flight management features in display # IMPORTANT YOU MUST INCLUDE", "XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER + 's' XGDS_PLANNER_COMMAND_MONIKER = \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL =", "of XPJSON schemas available in the # planner. Notes: # # * @schemaSource", "the simulator module defined by # the file at @simulatorUrl. # XGDS_PLANNER_SCHEMAS =", "writing, software distributed # under the License is distributed on an \"AS IS\"", "for right now. Really it should not be jammed in that file. PIPELINE['JAVASCRIPT']['custom_map']", "= getOrCreateDict('PIPELINE') PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT') # if we are using", "), 'output_filename': 'css/planner_tests.css', }, } PIPELINE = getOrCreateDict('PIPELINE') PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] =", "see xgds_kn for example XGDS_PLANNER_COMMAND_RENDERERS = {} # If this is defined (true)", "end after modification or save. MODIFY = 'Modify' SAVE = 'Save' DELETE =", "# to the build/static/xgds_planner2 directory. The client-side JS # reads the simplified versions", "django.conf import settings print settings.FOO Don't try to get the value of FOO", "form, set this to the site id from your WITHIN plan library. XGDS_PLANNER_DEFAULT_SITE", "the site id from your WITHIN plan library. XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames') #", "from django.conf import settings print settings.FOO Don't try to get the value of", "* @schemaSource and @librarySource are paths relative to the PROJ_ROOT # base directory", "= False XGDS_PLANNER_TEST_SKIP_PLAN_REST = False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN =", "getOrCreateDict('PIPELINE') PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT') # if we are using the", "= \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER = \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER + 's' XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\"", "new parameters that can be modified in the Django settings module. Let's say", "in the PlanSchema database table. # XGDS_PLANNER_SCHEMAS = [ # ] # XGDS_PLANNER_SCHEMAS:", "PlanLibrary source files. One of the steps within 'manage.py prep' # is 'prepapps'.", "set this to the site id from your WITHIN plan library. XGDS_PLANNER_DEFAULT_SITE =", "of FOO from django.conf.settings. That settings object will not know about the default", "= 'Python' EXEC = 'Exec' XGDS_PLANNER_CALLBACK = [] # If you will be", "= [ # ] # XGDS_PLANNER_SCHEMAS: A list of XPJSON schemas available in", "list of (formatCode, extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS = ( ('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv',", "# 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS = { 'planner_app': { 'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js',", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express", "library. XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames') # Method to add stuff to context for", "set in the PlanSchema database table. # XGDS_PLANNER_SCHEMAS = [ # ] #", "Test skipping variables. Set to true if code somewhere else overrides # some", "('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) # list of (formatCode, extension,", "# IMPORTANT YOU MUST INCLUDE THIS IN SITE SETTINGS # TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS", "is the JavaScript name of the simulator module defined by # the file", "that function implemented. # Dictionary should be: legible name: namespace of library XGDS_PLANNER_PLOTS", "to be set in the settings, now they are set in the PlanSchema", "Uncomment the below to see plannerSamplePlot.js, and include it in planner_app_base # XGDS_PLANNER_PLOTS['Sample']", "is installed with bower 'xgds_planner2_testing': { 'source_filenames': ( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ), 'output_filename': 'js/planner_tests.js'", "@simulatorUrl. # XGDS_PLANNER_SCHEMAS = { # \"test\": { # \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\":", "it's separate in the planner_app # 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ), 'output_filename': 'css/planner_app.css', 'template_name':", "it should not be jammed in that file. PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js',", "# # * @schemaSource and @librarySource are paths relative to the PROJ_ROOT #", "('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) # list of (formatCode, extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS = (", "save. MODIFY = 'Modify' SAVE = 'Save' DELETE = 'Delete' JAVASCRIPT = 'JavaScript'", "modification or save. # If it is a javascript method, it will happen", "the bearing distance be in crs units XGDS_PLANNER_CRS_UNITS_DEFAULT = False # list of", "it will happen on the back end after modification or save. # If", "= 'sample_plot' # Turn on to enable plan validation support and UI XGDS_PLANNER_VALIDATION", "command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = () # XGDS_PLANNER_COMMAND_RENDERERS - A dict of Command type", "is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null' # This is used to hold a map", "closest site frame. # It is initialized by calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES = []", "Version 2.0 # (the \"License\"); you may not use this file except in", "getOrCreateDict XGDS_PLANNER_OFFLINE = False # Don't load google earth if this is true", "XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER + 's' XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER + 's'", "PIPELINE_CSS = {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ### \"\"\" import os from geocamUtil.SettingsUtil import getOrCreateDict", "of the client-side planner JS. # # * @simulator is the JavaScript name", "# # * @simulator is the JavaScript name of the simulator module defined", "'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js',", "to planExecution if you are not doing the basic planExecution. # This gets", "the simulator model for the schema. The model is loaded # as part", "be executed in order either when the plan is modified or when the", "in the site-level settings module, like this: FOO = 'a better value' Other", "XGDS_PLANNER_TEST_SKIP_DOC = False XGDS_PLANNER_TEST_SKIP_PLAN_REST = False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN", "License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed", "# must create 'simulator' entry in top-level siteSettings.py #TODO update, qunit is installed", "'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js',", "callback to call after the links tab is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null' #", "'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js',", "module. Let's say one such parameter is FOO. The default value for FOO", "= { 'planner_app': { 'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js',", "is defined (true) then include the scheduling & flight management features in display", "{ 'source_filenames': ( 'qunit/qunit/qunit.css', ), 'output_filename': 'css/planner_tests.css', }, } PIPELINE = getOrCreateDict('PIPELINE') PIPELINE['CSS']", "files for your planner and simulator PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing", "be used in the mapviews.js # to render a command in a custom", "them as follows # XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON), # (SAVE,'my.planner.save.callback', JAVASCRIPT)] # they", "access the value of FOO like this: from django.conf import settings print settings.FOO", "save. # If it is a javascript method, it will happen on the", "'simulator' entry in top-level siteSettings.py #TODO update, qunit is installed with bower 'xgds_planner2_testing':", "prep' # is 'prepapps'. During that step, those files are processed by #", "or agreed to in writing, software distributed # under the License is distributed", "that # xgds_planner provides; remove anything you won't be using. XGDS_PLANNER_PLAN_EXPORTERS = (", "compilation of javascript files for your planner and simulator PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js',", "value, they can override it in the site-level settings module, like this: FOO", "# OVERRIDE this in your sitesettings to have a custom plan create, note", "= [] # If you will be plotting values in the flot plot", "are paths relative to the PROJ_ROOT # base directory for the site. They", "plan library. XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames') # Method to add stuff to context", "('IRG', 'Ames') # Method to add stuff to context for plan editor, override", "XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None # OVERRIDE this in your sitesettings to have a custom", "tuple ), 'output_filename': 'js/simulator.js', } _thisDir = os.path.dirname(__file__) # Set to true to", "or save. # If it is a javascript method, it will happen on", "and register your own method if you need it. # It must add", "will happen on the back end after modification or save. MODIFY = 'Modify'", "simulator PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing comma is critical because this", "of Command type to javascript file to be used in the mapviews.js #", "'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ), 'output_filename': 'js/compiled_planner_app.js' }, # must create 'simulator' entry in", "a tuple ), 'output_filename': 'js/simulator.js', } _thisDir = os.path.dirname(__file__) # Set to true", "THIS IN SITE SETTINGS # TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( # ... #", "XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext' # Method to add stuff to planExecution if you are", "site id from your WITHIN plan library. XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames') # Method", "JavaScript # file that defines the simulator model for the schema. The model", "'Ames') # Method to add stuff to context for plan editor, override and", "'source_filenames': ( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ), 'output_filename': 'js/planner_tests.js' } } XGDS_PLANNER_PIPELINE_CSS = { 'planner_app':", "simulator js } } # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to be included by the", "'id']} XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid', 'dateModified', 'jsonPlan', 'deleted', 'readOnly', 'numStations',", "can't have a reverse lookup. XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\" # Schema used to be", "# Override this compilation of javascript files for your planner and simulator PIPELINE['JAVASCRIPT']['simulator']", "on the back end after modification or save. MODIFY = 'Modify' SAVE =", "then include the scheduling & flight management features in display # IMPORTANT YOU", "context for plan editor, override and register your own method if you need", "# If it is a javascript method, it will happen on the front", "('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'),", "add a json dictionary called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext' # Method to add", "'Delete' JAVASCRIPT = 'JavaScript' PYTHON = 'Python' EXEC = 'Exec' XGDS_PLANNER_CALLBACK = []", "BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied.", "are written # to the build/static/xgds_planner2 directory. The client-side JS # reads the", "{} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS = {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ### \"\"\" import os from geocamUtil.SettingsUtil", "your sitesettings to have a custom plan create, note that since it's in", "# Uncomment the below to see plannerSamplePlot.js, and include it in planner_app_base #", "the default value! ### # DJANGO-PIPELINE ADDENDUM: For this module to work, the", "= False XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates', 'handlebars'), os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH =", "siteSettings.py section might look like this: PIPELINE_JS = {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS = {}", "file at @simulatorUrl. # XGDS_PLANNER_SCHEMAS = { # \"test\": { # \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\",", "frames, so we can convert lat/long to the closest site frame. # It", "own method if you need it. # It must add a json dictionary", "that file. PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename': 'js/custom_map.js' } #", "'JavaScript' PYTHON = 'Python' EXEC = 'Exec' XGDS_PLANNER_CALLBACK = [] # If you", "'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ), 'output_filename': 'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing': { 'source_filenames': (", "[ # ] # XGDS_PLANNER_SCHEMAS: A list of XPJSON schemas available in the", "FOO from django.conf.settings. That settings object will not know about the default value!", "be set in the settings, now they are set in the PlanSchema database", "obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required", "False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates', 'handlebars'), os.path.join('xgds_map_server',", "for example XGDS_PLANNER_COMMAND_RENDERERS = {} # If this is defined (true) then include", "to be used in the mapviews.js # to render a command in a", "= [os.path.join('xgds_planner2', 'templates', 'handlebars'), os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK:", "'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ), 'output_filename':", "= {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ### \"\"\" import os from geocamUtil.SettingsUtil import getOrCreateDict XGDS_PLANNER_OFFLINE", "so we can convert lat/long to the closest site frame. # It is", "planner. XGDS_PLANNER_TEST_SKIP_INDEX = False XGDS_PLANNER_TEST_SKIP_EDIT = False XGDS_PLANNER_TEST_SKIP_DOC = False XGDS_PLANNER_TEST_SKIP_PLAN_REST = False", "settings you can't have a reverse lookup. XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\" # Schema used", "happen on the back end after modification or save. MODIFY = 'Modify' SAVE", "'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'),", "separate in the planner_app # 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ), 'output_filename': 'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css',", "site-level config file (siteSettings.py), must merge the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings into global", "method if you need it. # It must add a json dictionary called", "'s' XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER + 's' XGDS_PLANNER_COMMAND_MONIKER = \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL", "# Dictionary should be: legible name: namespace of library XGDS_PLANNER_PLOTS = {} #", "getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations', 'type', 'id']} XGDS_DATA_MASKED_FIELDS =", "the value of FOO from django.conf.settings. That settings object will not know about", "settings dicts. If no other django-pipeline includes are defined, the relevant siteSettings.py section", "settings print settings.FOO Don't try to get the value of FOO from django.conf.settings.", "some reason compressing this in the css does not work so it's separate", "note that since it's in site settings you can't have a reverse lookup.", "not be jammed in that file. PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ),", "to work, the site-level config file (siteSettings.py), must merge the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS", "to support custom command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = () # XGDS_PLANNER_COMMAND_RENDERERS - A dict", "JavaScript callback to call after the links tab is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null'", "to a JavaScript # file that defines the simulator model for the schema.", "the site doesn't like the default value, they can override it in the", "processed by # compileXpjson.py and the simplified/canonical versions are written # to the", "# It is initialized by calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES = [] XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP')", "WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the", "'numCommands', 'stats' ] } # If you have callbacks to be connected to", "(global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( # ... # 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED = None # Test skipping", "} _thisDir = os.path.dirname(__file__) # Set to true to make the bearing distance", "calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES = [] XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js', 'model':", "# (SAVE,'my.planner.save.callback', JAVASCRIPT)] # they will be executed in order either when the", "permissions and limitations under the License. #__END_LICENSE__ from geocamUtil.SettingsUtil import getOrCreateArray \"\"\" This", "'planner_app': { 'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css', # for some reason compressing this in the", "using the planner we want to add uploadJson into the custom map for", "IN SITE SETTINGS # TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( # ... # 'geocamUtil.context_processors.settings'", "frame. # It is initialized by calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES = [] XGDS_MAP_SERVER_JS_MAP =", "# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "implied. See the License for the # specific language governing permissions and limitations", "XGDS_PLANNER_PLAN_MONIKER = \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER = \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER +", "os from geocamUtil.SettingsUtil import getOrCreateDict XGDS_PLANNER_OFFLINE = False # Don't load google earth", "XGDS_PLANNER_PIPELINE_JS = { 'planner_app': { 'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js',", "have a reverse lookup. XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\" # Schema used to be set", "[] # If you will be plotting values in the flot plot chart,", "'Exec' XGDS_PLANNER_CALLBACK = [] # If you will be plotting values in the", "the value of FOO like this: from django.conf import settings print settings.FOO Don't", "if you are not doing the basic planExecution. # This gets invoked from", "the XPJSON PlanSchema and # PlanLibrary source files. One of the steps within", "'.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), ) # kml root from", "functions here. # You must also then include the javascript library that has", "either express or implied. See the License for the # specific language governing", "save. # If it is an 'exec' method, it will happen on the", "one such parameter is FOO. The default value for FOO is defined in", "XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified name of an # extra", "The client-side JS # reads the simplified versions from there. # # *", "happen on the back end after modification or save. # If it is", "3 XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER = \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER = \"Station\"", "the client-side planner JS. # # * @simulator is the JavaScript name of", "it is a Python method, it will happen on the back end after", "A list of XPJSON schemas available in the # planner. Notes: # #", "language governing permissions and limitations under the License. #__END_LICENSE__ from geocamUtil.SettingsUtil import getOrCreateArray", "js } } # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to be included by the mapviews.js", "# they will be executed in order either when the plan is modified", "base directory for the site. They point to the XPJSON PlanSchema and #", "register functions here. # You must also then include the javascript library that", "should not be jammed in that file. PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js'", "\"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\": \"xgds_planner2.ExampleSimulator\", # }, \"GenericVehicle\": { \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\":", "and Space Administration. # All rights reserved. # # The xGDS platform is", "this: FOO = 'my default value' If the admin for the site doesn't", "to make the bearing distance be in crs units XGDS_PLANNER_CRS_UNITS_DEFAULT = False #", "XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations', 'type', 'id']} XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS')", "# ] # XGDS_PLANNER_SCHEMAS: A list of XPJSON schemas available in the #", "# You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. #", "javascript file to be used in the mapviews.js # to render a command", "planner JS. # # * @simulator is the JavaScript name of the simulator", "to the PROJ_ROOT # base directory for the site. They point to the", "plan editor, override and register your own method if you need it. #", "extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext' # Method to add stuff to planExecution if you", "in the mapviews.js # to render a command in a custom way. #", "False # list of (formatCode, extension, exporterClass). This is the entire list of", "License, Version 2.0 # (the \"License\"); you may not use this file except", "( # ... # 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED = None # Test skipping variables. Set", "XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings into global PIPELINE_{JS|CSS} settings dicts. If no other django-pipeline", "# If this is defined (true) then include the scheduling & flight management", "is saved. # If it is a Python method, it will happen on", "If no other django-pipeline includes are defined, the relevant siteSettings.py section might look", "defined, the relevant siteSettings.py section might look like this: PIPELINE_JS = {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS)", "XGDS_PLANNER_PLAN_IMPORTERS = ( ('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), )", "module, like this: FOO = 'a better value' Other modules can access the", "settings module. Let's say one such parameter is FOO. The default value for", "# # * @simulatorUrl is relative to STATIC_URL. It should point to a", "include the scheduling & flight management features in display # IMPORTANT YOU MUST", "'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js',", "All rights reserved. # # The xGDS platform is licensed under the Apache", "FOO like this: from django.conf import settings print settings.FOO Don't try to get", "value of FOO from django.conf.settings. That settings object will not know about the", "It is initialized by calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES = [] XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan']", "this in the css does not work so it's separate in the planner_app", "xgds_kn for example XGDS_PLANNER_COMMAND_RENDERERS = {} # If this is defined (true) then", "a JavaScript # file that defines the simulator model for the schema. The", "and @librarySource are paths relative to the PROJ_ROOT # base directory for the", "\"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\": \"xgds_planner2.ExampleSimulator\", # },", "import getOrCreateArray \"\"\" This app may define some new parameters that can be", "for plan editor, override and register your own method if you need it.", "print settings.FOO Don't try to get the value of FOO from django.conf.settings. That", "'xgds_planner2_testing': { 'source_filenames': ( 'qunit/qunit/qunit.css', ), 'output_filename': 'css/planner_tests.css', }, } PIPELINE = getOrCreateDict('PIPELINE')", "value of FOO like this: from django.conf import settings print settings.FOO Don't try", "'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js',", "value' If the admin for the site doesn't like the default value, they", "to hold a map of site frames, so we can convert lat/long to", "'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ),", "True # 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS = { 'planner_app': { 'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js',", "required by applicable law or agreed to in writing, software distributed # under", "compliance with the License. # You may obtain a copy of the License", "a command in a custom way. # see xgds_kn for example XGDS_PLANNER_COMMAND_RENDERERS =", "executed in order either when the plan is modified or when the plan", "override it in the site-level settings module, like this: FOO = 'a better", "your planner and simulator PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing comma is", "= 'my default value' If the admin for the site doesn't like the", "there. # # * @simulatorUrl is relative to STATIC_URL. It should point to", "licensed under the Apache License, Version 2.0 # (the \"License\"); you may not", "value' Other modules can access the value of FOO like this: from django.conf", "XGDS_PLANNER_PIPELINE_CSS = { 'planner_app': { 'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css', # for some reason compressing", "for the site. They point to the XPJSON PlanSchema and # PlanLibrary source", "planner we want to add uploadJson into the custom map for right now.", "on the back end after modification or save. # If it is a", "it a tuple ), 'output_filename': 'js/simulator.js', } _thisDir = os.path.dirname(__file__) # Set to", "XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames') # Method to add stuff to context for plan", "= {} # Uncomment the below to see plannerSamplePlot.js, and include it in", "2.0 # (the \"License\"); you may not use this file except in compliance", "(SAVE,'my.planner.save.callback', JAVASCRIPT)] # they will be executed in order either when the plan", "now. Really it should not be jammed in that file. PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames':", "method, it will happen on the front end after modification or save. #", "directory. The client-side JS # reads the simplified versions from there. # #", "can access the value of FOO like this: from django.conf import settings print", "Apache License, Version 2.0 # (the \"License\"); you may not use this file", "name of the simulator module defined by # the file at @simulatorUrl. #", "relative to STATIC_URL. It should point to a JavaScript # file that defines", "this: FOO = 'a better value' Other modules can access the value of", "happen on the front end after modification or save. # If it is", "since it's in site settings you can't have a reverse lookup. XGDS_PLANNER_CREATE_URL =", "the site-level settings module, like this: FOO = 'a better value' Other modules", "'output_filename': 'js/simulator.js', } _thisDir = os.path.dirname(__file__) # Set to true to make the", "('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), ) # kml root from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS", "Set to true if code somewhere else overrides # some functionality in the", "modification or save. # If it is an 'exec' method, it will happen", "reverse lookup. XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\" # Schema used to be set in the", "Notes: # # * @schemaSource and @librarySource are paths relative to the PROJ_ROOT", "compressing this in the css does not work so it's separate in the", "Other modules can access the value of FOO like this: from django.conf import", "= {'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename': 'js/custom_map.js' } # Override this compilation", "# \"simulator\": \"xgds_planner2.ExampleSimulator\", # }, \"GenericVehicle\": { \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\",", "If it is an 'exec' method, it will happen on the back end", "# Unless required by applicable law or agreed to in writing, software distributed", "some functionality in the planner. XGDS_PLANNER_TEST_SKIP_INDEX = False XGDS_PLANNER_TEST_SKIP_EDIT = False XGDS_PLANNER_TEST_SKIP_DOC =", "you have callbacks to be connected to the planner, register them as follows", "it will happen on the back end after modification or save. MODIFY =", "a custom way. # see xgds_kn for example XGDS_PLANNER_COMMAND_RENDERERS = {} # If", "should point to a JavaScript # file that defines the simulator model for", "that since it's in site settings you can't have a reverse lookup. XGDS_PLANNER_CREATE_URL", "from there. # # * @simulatorUrl is relative to STATIC_URL. It should point", "file except in compliance with the License. # You may obtain a copy", "'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js',", "# It must add a json dictionary called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext' #", "= { 'planner_app': { 'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css', # for some reason compressing this", "), 'output_filename': 'js/compiled_planner_app.js' }, # must create 'simulator' entry in top-level siteSettings.py #TODO", "of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law", "plan is modified or when the plan is saved. # If it is", "\"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\", # the namespace within the simulator js", "@librarySource are paths relative to the PROJ_ROOT # base directory for the site.", "is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF", "list of (formatCode, extension, exporterClass). This is the entire list of everything that", "method, it will happen on the back end after modification or save. #", "= {} # If this is defined (true) then include the scheduling &", "into global PIPELINE_{JS|CSS} settings dicts. If no other django-pipeline includes are defined, the", "'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ), 'output_filename': 'js/compiled_planner_app.js'", "the below to see plannerSamplePlot.js, and include it in planner_app_base # XGDS_PLANNER_PLOTS['Sample'] =", "getOrCreateDict('PIPELINE.JAVASCRIPT') # if we are using the planner we want to add uploadJson", "be: legible name: namespace of library XGDS_PLANNER_PLOTS = {} # Uncomment the below", "convert lat/long to the closest site frame. # It is initialized by calling", "to be included by the mapviews.js # to support custom command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS", "list of XPJSON schemas available in the # planner. Notes: # # *", "= XGDS_PLANNER_STATION_MONIKER + 's' XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER + 's' XGDS_PLANNER_COMMAND_MONIKER", "add stuff to planExecution if you are not doing the basic planExecution. #", "model is loaded # as part of the client-side planner JS. # #", "'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified name of an", "# ### \"\"\" import os from geocamUtil.SettingsUtil import getOrCreateDict XGDS_PLANNER_OFFLINE = False #", "= { # \"test\": { # \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\":", "('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), #", "), 'output_filename': 'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing': { 'source_filenames': ( 'qunit/qunit/qunit.css', ), 'output_filename':", "units XGDS_PLANNER_CRS_UNITS_DEFAULT = False # list of (formatCode, extension, exporterClass). This is the", "SAVE = 'Save' DELETE = 'Delete' JAVASCRIPT = 'JavaScript' PYTHON = 'Python' EXEC", "XGDS_PLANNER_MAP_ROTATION_HANDLES = True XGDS_PLANNER_DIRECTIONAL_STATIONS = True # 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS = { 'planner_app': {", "= 'Exec' XGDS_PLANNER_CALLBACK = [] # If you will be plotting values in", "in writing, software distributed # under the License is distributed on an \"AS", "{ # \"test\": { # \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\",", "and the simplified/canonical versions are written # to the build/static/xgds_planner2 directory. The client-side", "'xgds_planner2/js/simulatorDriver.js' ), 'output_filename': 'js/compiled_planner_app.js' }, # must create 'simulator' entry in top-level siteSettings.py", "the custom map for right now. Really it should not be jammed in", "YOU MUST INCLUDE THIS IN SITE SETTINGS # TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS + (", "the PlanSchema database table. # XGDS_PLANNER_SCHEMAS = [ # ] # XGDS_PLANNER_SCHEMAS: A", "\"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\", # the namespace within the simulator js }", "if this is true XGDS_PLANNER_MAP_ROTATION_HANDLES = True XGDS_PLANNER_DIRECTIONAL_STATIONS = True # 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS", "say one such parameter is FOO. The default value for FOO is defined", "# XGDS_PLANNER_COMMAND_RENDERERS - A dict of Command type to javascript file to be", "'.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), ) # kml root from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL =", "we can convert lat/long to the closest site frame. # It is initialized", "PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT') # if we are using the planner", "is loaded # as part of the client-side planner JS. # # *", "include it in planner_app_base # XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot' # Turn on to enable", "need it. # It must add a json dictionary called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD =", "} XGDS_PLANNER_PIPELINE_CSS = { 'planner_app': { 'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css', # for some reason", "in the settings, now they are set in the PlanSchema database table. #", "MUST INCLUDE THIS IN SITE SETTINGS # TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( #", "import settings print settings.FOO Don't try to get the value of FOO from", "# Schema used to be set in the settings, now they are set", "to have a default site frame in the creation form, set this to", "gets invoked from schedulePlans call in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None # OVERRIDE this", "XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot' # Turn on to enable plan validation support and UI", "\"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\": \"xgds_planner2.ExampleSimulator\", # }, \"GenericVehicle\": { \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\",", "a map of site frames, so we can convert lat/long to the closest", "# (the \"License\"); you may not use this file except in compliance with", "# kml root from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS = 3 XGDS_PLANNER_PLAN_MODEL =", "= \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER + 's' XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER", "'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js',", "a reverse lookup. XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\" # Schema used to be set in", "everything that # xgds_planner provides; remove anything you won't be using. XGDS_PLANNER_PLAN_EXPORTERS =", ") # list of (formatCode, extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS = ( ('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'),", "in the css does not work so it's separate in the planner_app #", "not work so it's separate in the planner_app # 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ),", "}, } PIPELINE = getOrCreateDict('PIPELINE') PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT') # if", "}, # must create 'simulator' entry in top-level siteSettings.py #TODO update, qunit is", "# \"test\": { # \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", #", "} } # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to be included by the mapviews.js #", "Command type to javascript file to be used in the mapviews.js # to", "entire list of everything that # xgds_planner provides; remove anything you won't be", "defines the simulator model for the schema. The model is loaded # as", "that has that function implemented. # Dictionary should be: legible name: namespace of", "Schema used to be set in the settings, now they are set in", "name: namespace of library XGDS_PLANNER_PLOTS = {} # Uncomment the below to see", "the # Administrator of the National Aeronautics and Space Administration. # All rights", "+ 's' #TODO to have a default site frame in the creation form,", "critical because this makes it a tuple ), 'output_filename': 'js/simulator.js', } _thisDir =", "add stuff to context for plan editor, override and register your own method", "from geocamUtil.SettingsUtil import getOrCreateArray \"\"\" This app may define some new parameters that", "XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER + 's' #TODO to have a default site frame in", "= getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations', 'type', 'id']} XGDS_DATA_MASKED_FIELDS", "'Save' DELETE = 'Delete' JAVASCRIPT = 'JavaScript' PYTHON = 'Python' EXEC = 'Exec'", "under the Apache License, Version 2.0 # (the \"License\"); you may not use", "fully qualified name of an # extra JavaScript callback to call after the", "'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED = None # Test skipping variables. Set to true if code", "bower 'xgds_planner2_testing': { 'source_filenames': ( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ), 'output_filename': 'js/planner_tests.js' } } XGDS_PLANNER_PIPELINE_CSS", "to render a command in a custom way. # see xgds_kn for example", "('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename': 'js/custom_map.js' } # Override this compilation of javascript", "namespace within the simulator js } } # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to be", "at @simulatorUrl. # XGDS_PLANNER_SCHEMAS = { # \"test\": { # \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", #", "(formatCode, extension, exporterClass). This is the entire list of everything that # xgds_planner", "namespace of library XGDS_PLANNER_PLOTS = {} # Uncomment the below to see plannerSamplePlot.js,", "is true XGDS_PLANNER_MAP_ROTATION_HANDLES = True XGDS_PLANNER_DIRECTIONAL_STATIONS = True # 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS = {", "\"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER = \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER + 's' XGDS_PLANNER_SEGMENT_MONIKER", "and simulator PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing comma is critical because", "# * @schemaSource and @librarySource are paths relative to the PROJ_ROOT # base", "False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2',", "If it is a javascript method, it will happen on the front end", "using. XGDS_PLANNER_PLAN_EXPORTERS = ( ('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'),", "the steps within 'manage.py prep' # is 'prepapps'. During that step, those files", "'xgds_planner2.views.addToEditorContext' # Method to add stuff to planExecution if you are not doing", "'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified name of an #", "= 3 XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER = \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER =", "JavaScript name of the simulator module defined by # the file at @simulatorUrl.", "by the # Administrator of the National Aeronautics and Space Administration. # All", "relevant siteSettings.py section might look like this: PIPELINE_JS = {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS =", "The fully qualified name of an # extra JavaScript callback to call after", "CONDITIONS OF ANY KIND, either express or implied. See the License for the", "# the file at @simulatorUrl. # XGDS_PLANNER_SCHEMAS = { # \"test\": { #", "\"simulator\": \"xgds_planner2.ExampleSimulator\", # }, \"GenericVehicle\": { \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\":", "modules can access the value of FOO like this: from django.conf import settings", "to true to make the bearing distance be in crs units XGDS_PLANNER_CRS_UNITS_DEFAULT =", "'templates', 'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified name of", "'numSegments', 'numCommands', 'stats' ] } # If you have callbacks to be connected", "OR # CONDITIONS OF ANY KIND, either express or implied. See the License", "Aeronautics and Space Administration. # All rights reserved. # # The xGDS platform", "'.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml',", "If the admin for the site doesn't like the default value, they can", "css does not work so it's separate in the planner_app # 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css',", "the National Aeronautics and Space Administration. # All rights reserved. # # The", "module defined by # the file at @simulatorUrl. # XGDS_PLANNER_SCHEMAS = { #", "None # Test skipping variables. Set to true if code somewhere else overrides", "such parameter is FOO. The default value for FOO is defined in this", "XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations', 'type', 'id']} XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid', 'dateModified',", "... # 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED = None # Test skipping variables. Set to true", "this is true XGDS_PLANNER_MAP_ROTATION_HANDLES = True XGDS_PLANNER_DIRECTIONAL_STATIONS = True # 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS =", "directory for the site. They point to the XPJSON PlanSchema and # PlanLibrary", "create, note that since it's in site settings you can't have a reverse", "section might look like this: PIPELINE_JS = {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS = {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS)", "the front end after modification or save. # If it is an 'exec'", "PIPELINE_JS = {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS = {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ### \"\"\" import os", "} # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to be included by the mapviews.js # to", "INCLUDE THIS IN SITE SETTINGS # TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( # ...", "This trailing comma is critical because this makes it a tuple ), 'output_filename':", "# If you will be plotting values in the flot plot chart, register", "PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS = {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ### \"\"\" import os from geocamUtil.SettingsUtil import", "want to add uploadJson into the custom map for right now. Really it", "'manage.py prep' # is 'prepapps'. During that step, those files are processed by", "follows # XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON), # (SAVE,'my.planner.save.callback', JAVASCRIPT)] # they will be", "software distributed # under the License is distributed on an \"AS IS\" BASIS,", "you won't be using. XGDS_PLANNER_PLAN_EXPORTERS = ( ('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'),", "define some new parameters that can be modified in the Django settings module.", "'my default value' If the admin for the site doesn't like the default", "'a better value' Other modules can access the value of FOO like this:", "doing the basic planExecution. # This gets invoked from schedulePlans call in views.py", "agreed to in writing, software distributed # under the License is distributed on", "in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None # OVERRIDE this in your sitesettings to have", "# ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) # list of (formatCode, extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS =", "links tab is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null' # This is used to hold", "XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid', 'dateModified', 'jsonPlan', 'deleted', 'readOnly', 'numStations', 'numSegments',", "XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER + 's' XGDS_PLANNER_COMMAND_MONIKER = \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER + 's'", "entry in top-level siteSettings.py #TODO update, qunit is installed with bower 'xgds_planner2_testing': {", "better value' Other modules can access the value of FOO like this: from", "of the National Aeronautics and Space Administration. # All rights reserved. # #", "mapviews.js # to support custom command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = () # XGDS_PLANNER_COMMAND_RENDERERS -", "{'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename': 'js/custom_map.js' } # Override this compilation of", "'xgds_planner2.planImporter.XPJsonPlanImporter'), ) # kml root from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS = 3", "_thisDir = os.path.dirname(__file__) # Set to true to make the bearing distance be", "SETTINGS # TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( # ... # 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED =", "= [] XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations',", "False XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates', 'handlebars'), os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars'", "the Django settings module. Let's say one such parameter is FOO. The default", "DELETE = 'Delete' JAVASCRIPT = 'JavaScript' PYTHON = 'Python' EXEC = 'Exec' XGDS_PLANNER_CALLBACK", "PlanSchema database table. # XGDS_PLANNER_SCHEMAS = [ # ] # XGDS_PLANNER_SCHEMAS: A list", "the plan is modified or when the plan is saved. # If it", "root from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS = 3 XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER", "# All rights reserved. # # The xGDS platform is licensed under the", "from schedulePlans call in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None # OVERRIDE this in your", "at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to", "extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS = ( ('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json',", "# Method to add stuff to planExecution if you are not doing the", "Method to add stuff to context for plan editor, override and register your", "### \"\"\" import os from geocamUtil.SettingsUtil import getOrCreateDict XGDS_PLANNER_OFFLINE = False # Don't", "your own method if you need it. # It must add a json", "this makes it a tuple ), 'output_filename': 'js/simulator.js', } _thisDir = os.path.dirname(__file__) #", "won't be using. XGDS_PLANNER_PLAN_EXPORTERS = ( ('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance',", "and XGDS_PLANNER_PIPELINE_CSS settings into global PIPELINE_{JS|CSS} settings dicts. If no other django-pipeline includes", "= 'null' # This is used to hold a map of site frames,", "google earth if this is true XGDS_PLANNER_MAP_ROTATION_HANDLES = True XGDS_PLANNER_DIRECTIONAL_STATIONS = True #", "your WITHIN plan library. XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames') # Method to add stuff", "legible name: namespace of library XGDS_PLANNER_PLOTS = {} # Uncomment the below to", "lookup. XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\" # Schema used to be set in the settings,", "that step, those files are processed by # compileXpjson.py and the simplified/canonical versions", "( ('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'),", "}, \"GenericVehicle\": { \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\", # the", "know about the default value! ### # DJANGO-PIPELINE ADDENDUM: For this module to", "applicable law or agreed to in writing, software distributed # under the License", "the schema. The model is loaded # as part of the client-side planner", "() # XGDS_PLANNER_COMMAND_RENDERERS - A dict of Command type to javascript file to", "\"\"\" import os from geocamUtil.SettingsUtil import getOrCreateDict XGDS_PLANNER_OFFLINE = False # Don't load", "# PlanLibrary source files. One of the steps within 'manage.py prep' # is", "# This trailing comma is critical because this makes it a tuple ),", "'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js',", "from your WITHIN plan library. XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames') # Method to add", "for your planner and simulator PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing comma", "can convert lat/long to the closest site frame. # It is initialized by", "called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext' # Method to add stuff to planExecution if", "the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings into global PIPELINE_{JS|CSS} settings dicts. If no other", "XPJSON schemas available in the # planner. Notes: # # * @schemaSource and", "is 'prepapps'. During that step, those files are processed by # compileXpjson.py and", "Government, as represented by the # Administrator of the National Aeronautics and Space", "= \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER = \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER = \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL =", "skipping variables. Set to true if code somewhere else overrides # some functionality", "defined (true) then include the scheduling & flight management features in display #", "] # XGDS_PLANNER_SCHEMAS: A list of XPJSON schemas available in the # planner.", "the flot plot chart, register functions here. # You must also then include", "TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( # ... # 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED = None #", "IMPORTANT YOU MUST INCLUDE THIS IN SITE SETTINGS # TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS +", "register them as follows # XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON), # (SAVE,'my.planner.save.callback', JAVASCRIPT)] #", "# XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot' # Turn on to enable plan validation support and", "value! ### # DJANGO-PIPELINE ADDENDUM: For this module to work, the site-level config", "end after modification or save. # If it is an 'exec' method, it", "'stats' ] } # If you have callbacks to be connected to the", "# 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED = None # Test skipping variables. Set to true if", "= False XGDS_PLANNER_TEST_SKIP_EDIT = False XGDS_PLANNER_TEST_SKIP_DOC = False XGDS_PLANNER_TEST_SKIP_PLAN_REST = False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT =", "used in the mapviews.js # to render a command in a custom way.", "the basic planExecution. # This gets invoked from schedulePlans call in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD", "Don't try to get the value of FOO from django.conf.settings. That settings object", "like the default value, they can override it in the site-level settings module,", "then include the javascript library that has that function implemented. # Dictionary should", "written # to the build/static/xgds_planner2 directory. The client-side JS # reads the simplified", "if code somewhere else overrides # some functionality in the planner. XGDS_PLANNER_TEST_SKIP_INDEX =", "'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js',", "of everything that # xgds_planner provides; remove anything you won't be using. XGDS_PLANNER_PLAN_EXPORTERS", "If you have callbacks to be connected to the planner, register them as", "so it's separate in the planner_app # 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ), 'output_filename': 'css/planner_app.css',", "XGDS_PLANNER_STATION_MONIKER = \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER + 's' XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL =", "# list of (formatCode, extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS = ( ('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv',", "It should point to a JavaScript # file that defines the simulator model", "- A dict of Command type to javascript file to be used in", "# You must also then include the javascript library that has that function", "os.path.dirname(__file__) # Set to true to make the bearing distance be in crs", "False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates', 'handlebars'), os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')]", "FOO is defined in this file, like this: FOO = 'my default value'", "PYTHON = 'Python' EXEC = 'Exec' XGDS_PLANNER_CALLBACK = [] # If you will", "# compileXpjson.py and the simplified/canonical versions are written # to the build/static/xgds_planner2 directory.", "update, qunit is installed with bower 'xgds_planner2_testing': { 'source_filenames': ( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ),", "License for the # specific language governing permissions and limitations under the License.", "it is an 'exec' method, it will happen on the back end after", "of the simulator module defined by # the file at @simulatorUrl. # XGDS_PLANNER_SCHEMAS", "PROJ_ROOT # base directory for the site. They point to the XPJSON PlanSchema", "XGDS_PLANNER_TEST_SKIP_EDIT = False XGDS_PLANNER_TEST_SKIP_DOC = False XGDS_PLANNER_TEST_SKIP_PLAN_REST = False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE", "It must add a json dictionary called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext' # Method", "# The xGDS platform is licensed under the Apache License, Version 2.0 #", "to STATIC_URL. It should point to a JavaScript # file that defines the", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND,", "'readOnly', 'numStations', 'numSegments', 'numCommands', 'stats' ] } # If you have callbacks to", "= 'Delete' JAVASCRIPT = 'JavaScript' PYTHON = 'Python' EXEC = 'Exec' XGDS_PLANNER_CALLBACK =", "# XGDS_PLANNER_SCHEMAS = { # \"test\": { # \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\",", "= os.path.dirname(__file__) # Set to true to make the bearing distance be in", "Dictionary should be: legible name: namespace of library XGDS_PLANNER_PLOTS = {} # Uncomment", "to the closest site frame. # It is initialized by calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES", "( 'qunit/qunit/qunit.css', ), 'output_filename': 'css/planner_tests.css', }, } PIPELINE = getOrCreateDict('PIPELINE') PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS", "see plannerSamplePlot.js, and include it in planner_app_base # XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot' # Turn", "site settings you can't have a reverse lookup. XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\" # Schema", "the file at @simulatorUrl. # XGDS_PLANNER_SCHEMAS = { # \"test\": { # \"schemaSource\":", "('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) # list", "or save. # If it is an 'exec' method, it will happen on", "javascript files for your planner and simulator PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This", "values in the flot plot chart, register functions here. # You must also", "call in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None # OVERRIDE this in your sitesettings to", "] } # If you have callbacks to be connected to the planner,", "False XGDS_PLANNER_TEST_SKIP_PLAN_REST = False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False", "default value for FOO is defined in this file, like this: FOO =", "the javascript library that has that function implemented. # Dictionary should be: legible", "express or implied. See the License for the # specific language governing permissions", "in planner_app_base # XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot' # Turn on to enable plan validation", "# XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to be included by the mapviews.js # to support", "to the build/static/xgds_planner2 directory. The client-side JS # reads the simplified versions from", "simulator module defined by # the file at @simulatorUrl. # XGDS_PLANNER_SCHEMAS = {", "xgds_planner provides; remove anything you won't be using. XGDS_PLANNER_PLAN_EXPORTERS = ( ('xpjson', '.json',", "XGDS_PLANNER_COMMAND_RENDERERS = {} # If this is defined (true) then include the scheduling", "within the simulator js } } # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to be included", "planner, register them as follows # XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON), # (SAVE,'my.planner.save.callback', JAVASCRIPT)]", "'js/compiled_planner_app.js' }, # must create 'simulator' entry in top-level siteSettings.py #TODO update, qunit", "= XGDS_PLANNER_SEGMENT_MONIKER + 's' XGDS_PLANNER_COMMAND_MONIKER = \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER + 's' #TODO", "the default value, they can override it in the site-level settings module, like", "Space Administration. # All rights reserved. # # The xGDS platform is licensed", "http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in writing,", "} } XGDS_PLANNER_PIPELINE_CSS = { 'planner_app': { 'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css', # for some", "the planner, register them as follows # XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON), # (SAVE,'my.planner.save.callback',", "the relevant siteSettings.py section might look like this: PIPELINE_JS = {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS", "License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS", "may define some new parameters that can be modified in the Django settings", "('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml',", "this: PIPELINE_JS = {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS = {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ### \"\"\" import", "true to make the bearing distance be in crs units XGDS_PLANNER_CRS_UNITS_DEFAULT = False", "FOO. The default value for FOO is defined in this file, like this:", "defined in this file, like this: FOO = 'my default value' If the", "= True # 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS = { 'planner_app': { 'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js',", "the back end after modification or save. MODIFY = 'Modify' SAVE = 'Save'", "the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or", "of library XGDS_PLANNER_PLOTS = {} # Uncomment the below to see plannerSamplePlot.js, and", "files. One of the steps within 'manage.py prep' # is 'prepapps'. During that", "JS. # # * @simulator is the JavaScript name of the simulator module", "= False # list of (formatCode, extension, exporterClass). This is the entire list", "A dict of Command type to javascript file to be used in the", "'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), ) # kml root from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\"", "XGDS_PLANNER_LINE_WIDTH_PIXELS = 3 XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER = \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER", "'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml', '.pml',", "rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = () # XGDS_PLANNER_COMMAND_RENDERERS - A dict of Command type to", "{ 'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css', # for some reason compressing this in the css", "for FOO is defined in this file, like this: FOO = 'my default", "# TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( # ... # 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED = None", "paths relative to the PROJ_ROOT # base directory for the site. They point", "functionality in the planner. XGDS_PLANNER_TEST_SKIP_INDEX = False XGDS_PLANNER_TEST_SKIP_EDIT = False XGDS_PLANNER_TEST_SKIP_DOC = False", "('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats',", "for some reason compressing this in the css does not work so it's", "loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null' # This is used to hold a map of", "type to javascript file to be used in the mapviews.js # to render", "{ 'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js',", "License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0.", "\"License\"); you may not use this file except in compliance with the License.", "this file, like this: FOO = 'my default value' If the admin for", "ANY KIND, either express or implied. See the License for the # specific", "also then include the javascript library that has that function implemented. # Dictionary", "xGDS platform is licensed under the Apache License, Version 2.0 # (the \"License\");", "like this: FOO = 'my default value' If the admin for the site", "'templates', 'handlebars'), os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully", "now they are set in the PlanSchema database table. # XGDS_PLANNER_SCHEMAS = [", "render a command in a custom way. # see xgds_kn for example XGDS_PLANNER_COMMAND_RENDERERS", "'Modify' SAVE = 'Save' DELETE = 'Delete' JAVASCRIPT = 'JavaScript' PYTHON = 'Python'", "'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js',", "the creation form, set this to the site id from your WITHIN plan", "= 'xgds_planner2.views.addToEditorContext' # Method to add stuff to planExecution if you are not", "#__END_LICENSE__ from geocamUtil.SettingsUtil import getOrCreateArray \"\"\" This app may define some new parameters", "One of the steps within 'manage.py prep' # is 'prepapps'. During that step,", "# file that defines the simulator model for the schema. The model is", "OF ANY KIND, either express or implied. See the License for the #", "must merge the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings into global PIPELINE_{JS|CSS} settings dicts. If", "a default site frame in the creation form, set this to the site", "'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'),", "from geocamUtil.SettingsUtil import getOrCreateDict XGDS_PLANNER_OFFLINE = False # Don't load google earth if", "it is a javascript method, it will happen on the front end after", "this is defined (true) then include the scheduling & flight management features in", "when the plan is saved. # If it is a Python method, it", "the Apache License, Version 2.0 # (the \"License\"); you may not use this", "See the License for the # specific language governing permissions and limitations under", "to get the value of FOO from django.conf.settings. That settings object will not", "variables. Set to true if code somewhere else overrides # some functionality in", "in the planner_app # 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ), 'output_filename': 'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css', },", "If it is a Python method, it will happen on the back end", "features in display # IMPORTANT YOU MUST INCLUDE THIS IN SITE SETTINGS #", "top-level siteSettings.py #TODO update, qunit is installed with bower 'xgds_planner2_testing': { 'source_filenames': (", "'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css', # for some reason compressing this in the css does", "step, those files are processed by # compileXpjson.py and the simplified/canonical versions are", "= \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER = \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER + 's'", "XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates', 'handlebars'), os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' #", "in order either when the plan is modified or when the plan is", "within 'manage.py prep' # is 'prepapps'. During that step, those files are processed", "from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS = 3 XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER =", "'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing': { 'source_filenames': ( 'qunit/qunit/qunit.css', ), 'output_filename': 'css/planner_tests.css', }, } PIPELINE", "file (siteSettings.py), must merge the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings into global PIPELINE_{JS|CSS} settings", "\"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS = 3 XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER = \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\"", "'.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml',", "modified in the Django settings module. Let's say one such parameter is FOO.", "to the site id from your WITHIN plan library. XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames')", "planExecution. # This gets invoked from schedulePlans call in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None", "in the planner. XGDS_PLANNER_TEST_SKIP_INDEX = False XGDS_PLANNER_TEST_SKIP_EDIT = False XGDS_PLANNER_TEST_SKIP_DOC = False XGDS_PLANNER_TEST_SKIP_PLAN_REST", "{'ol': 'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations', 'type', 'id']} XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] =", "'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js',", "trailing comma is critical because this makes it a tuple ), 'output_filename': 'js/simulator.js',", "when the plan is modified or when the plan is saved. # If", "'template_name': 'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing': { 'source_filenames': ( 'qunit/qunit/qunit.css', ), 'output_filename': 'css/planner_tests.css', }, }", "look like this: PIPELINE_JS = {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS = {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ###", "makes it a tuple ), 'output_filename': 'js/simulator.js', } _thisDir = os.path.dirname(__file__) # Set", "XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates', 'handlebars'), os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH", "provides; remove anything you won't be using. XGDS_PLANNER_PLAN_EXPORTERS = ( ('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'),", "must also then include the javascript library that has that function implemented. #", "# is 'prepapps'. During that step, those files are processed by # compileXpjson.py", "they can override it in the site-level settings module, like this: FOO =", "register your own method if you need it. # It must add a", "be connected to the planner, register them as follows # XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback',", "2015, United States Government, as represented by the # Administrator of the National", "overrides # some functionality in the planner. XGDS_PLANNER_TEST_SKIP_INDEX = False XGDS_PLANNER_TEST_SKIP_EDIT = False", "javascript method, it will happen on the front end after modification or save.", "front end after modification or save. # If it is an 'exec' method,", "use this file except in compliance with the License. # You may obtain", "# \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\": \"xgds_planner2.ExampleSimulator\", # }, \"GenericVehicle\": { \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\":", "the entire list of everything that # xgds_planner provides; remove anything you won't", "'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) # list of (formatCode,", "# planner. Notes: # # * @schemaSource and @librarySource are paths relative to", "'Python' EXEC = 'Exec' XGDS_PLANNER_CALLBACK = [] # If you will be plotting", "law or agreed to in writing, software distributed # under the License is", "XGDS_PLANNER_SITE_FRAMES = [] XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns':", "JS # reads the simplified versions from there. # # * @simulatorUrl is", "that defines the simulator model for the schema. The model is loaded #", "file to be used in the mapviews.js # to render a command in", "custom way. # see xgds_kn for example XGDS_PLANNER_COMMAND_RENDERERS = {} # If this", "distance be in crs units XGDS_PLANNER_CRS_UNITS_DEFAULT = False # list of (formatCode, extension,", "False XGDS_PLANNER_TEST_SKIP_DOC = False XGDS_PLANNER_TEST_SKIP_PLAN_REST = False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False", "is relative to STATIC_URL. It should point to a JavaScript # file that", "qunit is installed with bower 'xgds_planner2_testing': { 'source_filenames': ( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ), 'output_filename':", "point to the XPJSON PlanSchema and # PlanLibrary source files. One of the", "# Administrator of the National Aeronautics and Space Administration. # All rights reserved.", "is defined in this file, like this: FOO = 'my default value' If", "(the \"License\"); you may not use this file except in compliance with the", "# \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\": \"xgds_planner2.ExampleSimulator\", # }, \"GenericVehicle\": {", "= 'a better value' Other modules can access the value of FOO like", "for the # specific language governing permissions and limitations under the License. #__END_LICENSE__", "after modification or save. MODIFY = 'Modify' SAVE = 'Save' DELETE = 'Delete'", "file, like this: FOO = 'my default value' If the admin for the", "IS\" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or", "= False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False XGDS_PLANNER_HANDLEBARS_DIRS =", "has that function implemented. # Dictionary should be: legible name: namespace of library", "this in your sitesettings to have a custom plan create, note that since", "the planner we want to add uploadJson into the custom map for right", "custom command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = () # XGDS_PLANNER_COMMAND_RENDERERS - A dict of Command", "simplified versions from there. # # * @simulatorUrl is relative to STATIC_URL. It", "'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ), 'output_filename': 'js/compiled_planner_app.js' }, #", "after modification or save. # If it is a javascript method, it will", "in site settings you can't have a reverse lookup. XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\" #", "'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified name of an # extra JavaScript callback", "False XGDS_PLANNER_TEST_SKIP_EDIT = False XGDS_PLANNER_TEST_SKIP_DOC = False XGDS_PLANNER_TEST_SKIP_PLAN_REST = False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False", "will be plotting values in the flot plot chart, register functions here. #", "# some functionality in the planner. XGDS_PLANNER_TEST_SKIP_INDEX = False XGDS_PLANNER_TEST_SKIP_EDIT = False XGDS_PLANNER_TEST_SKIP_DOC", "XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations', 'type', 'id']}", "management features in display # IMPORTANT YOU MUST INCLUDE THIS IN SITE SETTINGS", "the # specific language governing permissions and limitations under the License. #__END_LICENSE__ from", "XGDS_PLANNER_COMMAND_MONIKER + 's' #TODO to have a default site frame in the creation", "PYTHON), # (SAVE,'my.planner.save.callback', JAVASCRIPT)] # they will be executed in order either when", "'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ), 'output_filename': 'js/compiled_planner_app.js' }, # must create", "= getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid', 'dateModified', 'jsonPlan', 'deleted', 'readOnly', 'numStations', 'numSegments', 'numCommands',", "schedulePlans call in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None # OVERRIDE this in your sitesettings", "= getOrCreateDict('PIPELINE.JAVASCRIPT') # if we are using the planner we want to add", "plan is saved. # If it is a Python method, it will happen", "site frame in the creation form, set this to the site id from", "Don't load google earth if this is true XGDS_PLANNER_MAP_ROTATION_HANDLES = True XGDS_PLANNER_DIRECTIONAL_STATIONS =", "# to support custom command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = () # XGDS_PLANNER_COMMAND_RENDERERS - A", "merge the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings into global PIPELINE_{JS|CSS} settings dicts. If no", "This app may define some new parameters that can be modified in the", "and limitations under the License. #__END_LICENSE__ from geocamUtil.SettingsUtil import getOrCreateArray \"\"\" This app", "MODIFY = 'Modify' SAVE = 'Save' DELETE = 'Delete' JAVASCRIPT = 'JavaScript' PYTHON", "* @simulator is the JavaScript name of the simulator module defined by #", "# list of (formatCode, extension, exporterClass). This is the entire list of everything", "parameter is FOO. The default value for FOO is defined in this file,", "in a custom way. # see xgds_kn for example XGDS_PLANNER_COMMAND_RENDERERS = {} #", "XPJSON PlanSchema and # PlanLibrary source files. One of the steps within 'manage.py", "you can't have a reverse lookup. XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\" # Schema used to", "{'Plan': ['uuid', 'dateModified', 'jsonPlan', 'deleted', 'readOnly', 'numStations', 'numSegments', 'numCommands', 'stats' ] } #", "planner_app_base # XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot' # Turn on to enable plan validation support", "FOO = 'my default value' If the admin for the site doesn't like", "those files are processed by # compileXpjson.py and the simplified/canonical versions are written", "= None # Test skipping variables. Set to true if code somewhere else", "with the License. # You may obtain a copy of the License at", "they are set in the PlanSchema database table. # XGDS_PLANNER_SCHEMAS = [ #", "planner and simulator PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing comma is critical", "Administrator of the National Aeronautics and Space Administration. # All rights reserved. #", "crs units XGDS_PLANNER_CRS_UNITS_DEFAULT = False # list of (formatCode, extension, exporterClass). This is", "#'xgds_planner2/css/forms_adjust.css', ), 'output_filename': 'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing': { 'source_filenames': ( 'qunit/qunit/qunit.css', ),", "+ ( # ... # 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED = None # Test skipping variables.", "# If it is an 'exec' method, it will happen on the back", "is a Python method, it will happen on the back end after modification", "'qunit/qunit/qunit.css', ), 'output_filename': 'css/planner_tests.css', }, } PIPELINE = getOrCreateDict('PIPELINE') PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT']", "XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified name of an # extra JavaScript callback to call", "# http://www.apache.org/licenses/LICENSE-2.0. # # Unless required by applicable law or agreed to in", "work so it's separate in the planner_app # 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ), 'output_filename':", "= \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER + 's' #TODO to have a default site", "(siteSettings.py), must merge the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings into global PIPELINE_{JS|CSS} settings dicts.", "( ('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), ) # kml", "[] XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations', 'type',", "= \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER + 's' XGDS_PLANNER_COMMAND_MONIKER = \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER", "# XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified name of an # extra JavaScript callback to", "in compliance with the License. # You may obtain a copy of the", "are not doing the basic planExecution. # This gets invoked from schedulePlans call", "it in the site-level settings module, like this: FOO = 'a better value'", "might look like this: PIPELINE_JS = {} PIPELINE_JS.update(plannerSettings.XGDS_PLANNER_PIPELINE_JS) PIPELINE_CSS = {} PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) #", "included by the mapviews.js # to support custom command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = ()", "the admin for the site doesn't like the default value, they can override", "not doing the basic planExecution. # This gets invoked from schedulePlans call in", "planExecution if you are not doing the basic planExecution. # This gets invoked", "'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) # list of (formatCode, extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS", "after modification or save. # If it is an 'exec' method, it will", "getOrCreateArray \"\"\" This app may define some new parameters that can be modified", "# the namespace within the simulator js } } # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files", "settings, now they are set in the PlanSchema database table. # XGDS_PLANNER_SCHEMAS =", "# Turn on to enable plan validation support and UI XGDS_PLANNER_VALIDATION = False", "= ( ('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), ) #", "of the steps within 'manage.py prep' # is 'prepapps'. During that step, those", "either when the plan is modified or when the plan is saved. #", "is initialized by calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES = [] XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] =", "} PIPELINE = getOrCreateDict('PIPELINE') PIPELINE['CSS'] = XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT') # if we", "= XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT') # if we are using the planner we", "\"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER = \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER + 's' XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL", "schemas available in the # planner. Notes: # # * @schemaSource and @librarySource", "to have a custom plan create, note that since it's in site settings", "can override it in the site-level settings module, like this: FOO = 'a", "a javascript method, it will happen on the front end after modification or", "# Don't load google earth if this is true XGDS_PLANNER_MAP_ROTATION_HANDLES = True XGDS_PLANNER_DIRECTIONAL_STATIONS", "XGDS_PLANNER_CRS_UNITS_DEFAULT = False # list of (formatCode, extension, exporterClass). This is the entire", "\"test\": { # \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\":", "'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js', 'xgds_planner2/js/olPlannerStyles.js', 'xgds_planner2/js/plannerLinksViews.js', 'xgds_planner2/js/plannerToolsViews.js', 'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js',", "the simplified/canonical versions are written # to the build/static/xgds_planner2 directory. The client-side JS", "simplified/canonical versions are written # to the build/static/xgds_planner2 directory. The client-side JS #", "'output_filename': 'js/compiled_planner_app.js' }, # must create 'simulator' entry in top-level siteSettings.py #TODO update,", "= ( ('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson',", "'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js',", "XGDS_PLANNER_PLOTS = {} # Uncomment the below to see plannerSamplePlot.js, and include it", "set in the settings, now they are set in the PlanSchema database table.", "object will not know about the default value! ### # DJANGO-PIPELINE ADDENDUM: For", "\"/xgds_planner2/plan/create\" # Schema used to be set in the settings, now they are", "in the creation form, set this to the site id from your WITHIN", "file that defines the simulator model for the schema. The model is loaded", "end after modification or save. # If it is a javascript method, it", "'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ), 'output_filename': 'js/compiled_planner_app.js' }, # must create 'simulator'", "by # the file at @simulatorUrl. # XGDS_PLANNER_SCHEMAS = { # \"test\": {", "'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations', 'type', 'id']} XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan':", "# DJANGO-PIPELINE ADDENDUM: For this module to work, the site-level config file (siteSettings.py),", "the site. They point to the XPJSON PlanSchema and # PlanLibrary source files.", "right now. Really it should not be jammed in that file. PIPELINE['JAVASCRIPT']['custom_map'] =", "They point to the XPJSON PlanSchema and # PlanLibrary source files. One of", "XGDS_PLANNER_PIPELINE_CSS PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT') # if we are using the planner we want", "XGDS_PLANNER_TEST_SKIP_PLAN_REST = False XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False XGDS_PLANNER_HANDLEBARS_DIRS", "# # The xGDS platform is licensed under the Apache License, Version 2.0", "frame in the creation form, set this to the site id from your", "# Method to add stuff to context for plan editor, override and register", "should be: legible name: namespace of library XGDS_PLANNER_PLOTS = {} # Uncomment the", "plannerSamplePlot.js, and include it in planner_app_base # XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot' # Turn on", "of an # extra JavaScript callback to call after the links tab is", "+ 's' XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER + 's' XGDS_PLANNER_COMMAND_MONIKER = \"Command\"", "# to render a command in a custom way. # see xgds_kn for", "The xGDS platform is licensed under the Apache License, Version 2.0 # (the", "load google earth if this is true XGDS_PLANNER_MAP_ROTATION_HANDLES = True XGDS_PLANNER_DIRECTIONAL_STATIONS = True", "by applicable law or agreed to in writing, software distributed # under the", "call after the links tab is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null' # This is", "default value! ### # DJANGO-PIPELINE ADDENDUM: For this module to work, the site-level", "importerClass) XGDS_PLANNER_PLAN_IMPORTERS = ( ('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'),", "saved. # If it is a Python method, it will happen on the", "it will happen on the front end after modification or save. # If", "the simplified versions from there. # # * @simulatorUrl is relative to STATIC_URL.", "a Python method, it will happen on the back end after modification or", "{ 'planner_app': { 'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css', # for some reason compressing this in", "this file except in compliance with the License. # You may obtain a", "= () # XGDS_PLANNER_COMMAND_RENDERERS - A dict of Command type to javascript file", "distributed # under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "= False # Don't load google earth if this is true XGDS_PLANNER_MAP_ROTATION_HANDLES =", "used to be set in the settings, now they are set in the", "# Set to true to make the bearing distance be in crs units", "settings into global PIPELINE_{JS|CSS} settings dicts. If no other django-pipeline includes are defined,", "\"genericVehicle.Simulator\", # the namespace within the simulator js } } # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript", "earth if this is true XGDS_PLANNER_MAP_ROTATION_HANDLES = True XGDS_PLANNER_DIRECTIONAL_STATIONS = True # 'external/js/jquery/jquery.migrate.min.js',", "geocamUtil.SettingsUtil import getOrCreateDict XGDS_PLANNER_OFFLINE = False # Don't load google earth if this", ") # kml root from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS = 3 XGDS_PLANNER_PLAN_MODEL", "id from your WITHIN plan library. XGDS_PLANNER_DEFAULT_SITE = ('IRG', 'Ames') # Method to", "XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid', 'dateModified', 'jsonPlan', 'deleted', 'readOnly', 'numStations', 'numSegments', 'numCommands', 'stats' ]", "display # IMPORTANT YOU MUST INCLUDE THIS IN SITE SETTINGS # TEMPLATE_CONTEXT_PROCESSORS =", "# ... # 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED = None # Test skipping variables. Set to", "= \"/xgds_planner2/plan/create\" # Schema used to be set in the settings, now they", "work, the site-level config file (siteSettings.py), must merge the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings", "are set in the PlanSchema database table. # XGDS_PLANNER_SCHEMAS = [ # ]", "extension, exporterClass). This is the entire list of everything that # xgds_planner provides;", "'.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson', '.crsjson', 'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json',", "else overrides # some functionality in the planner. XGDS_PLANNER_TEST_SKIP_INDEX = False XGDS_PLANNER_TEST_SKIP_EDIT =", "qualified name of an # extra JavaScript callback to call after the links", "is modified or when the plan is saved. # If it is a", "the License. #__END_LICENSE__ from geocamUtil.SettingsUtil import getOrCreateArray \"\"\" This app may define some", "'source_filenames': ( 'qunit/qunit/qunit.css', ), 'output_filename': 'css/planner_tests.css', }, } PIPELINE = getOrCreateDict('PIPELINE') PIPELINE['CSS'] =", "it's in site settings you can't have a reverse lookup. XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\"", "The model is loaded # as part of the client-side planner JS. #", "= False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates', 'handlebars'),", "reason compressing this in the css does not work so it's separate in", "xgds_map_server XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS = 3 XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER = \"Plan\"", "to the planner, register them as follows # XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON), #", "relative to the PROJ_ROOT # base directory for the site. They point to", "plot chart, register functions here. # You must also then include the javascript", "is an 'exec' method, it will happen on the back end after modification", "name of an # extra JavaScript callback to call after the links tab", "the PROJ_ROOT # base directory for the site. They point to the XPJSON", "create 'simulator' entry in top-level siteSettings.py #TODO update, qunit is installed with bower", "an # extra JavaScript callback to call after the links tab is loaded.", "site frames, so we can convert lat/long to the closest site frame. #", "be included by the mapviews.js # to support custom command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS =", "# XGDS_PLANNER_SCHEMAS = [ # ] # XGDS_PLANNER_SCHEMAS: A list of XPJSON schemas", "PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing comma is critical because this makes", "because this makes it a tuple ), 'output_filename': 'js/simulator.js', } _thisDir = os.path.dirname(__file__)", "XGDS_PLANNER_SCHEDULE_INCLUDED = None # Test skipping variables. Set to true if code somewhere", "{'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', # This trailing comma is critical because this makes it a", "+ 's' XGDS_PLANNER_COMMAND_MONIKER = \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER + 's' #TODO to have", "'output_filename': 'js/planner_tests.js' } } XGDS_PLANNER_PIPELINE_CSS = { 'planner_app': { 'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css', #", "are processed by # compileXpjson.py and the simplified/canonical versions are written # to", "DJANGO-PIPELINE ADDENDUM: For this module to work, the site-level config file (siteSettings.py), must", "the css does not work so it's separate in the planner_app # 'backbone-forms/distribution/templates/old.css',", "we are using the planner we want to add uploadJson into the custom", "default site frame in the creation form, set this to the site id", "'dateModified', 'jsonPlan', 'deleted', 'readOnly', 'numStations', 'numSegments', 'numCommands', 'stats' ] } # If you", "make the bearing distance be in crs units XGDS_PLANNER_CRS_UNITS_DEFAULT = False # list", "\"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\": \"xgds_planner2.ExampleSimulator\", # }, \"GenericVehicle\":", "\"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\", # the namespace within the simulator", "will happen on the front end after modification or save. # If it", "represented by the # Administrator of the National Aeronautics and Space Administration. #", "'xgds_planner2/js/plannerScheduleViews.js', 'xgds_planner2/js/plannerViews.js', 'xgds_planner2/js/map_viewer/olMapViews.js', 'xgds_planner2/js/olStationViews.js', 'xgds_planner2/js/olSegmentViews.js', 'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ), 'output_filename': 'js/compiled_planner_app.js' }, # must", "If this is defined (true) then include the scheduling & flight management features", "uploadJson into the custom map for right now. Really it should not be", "States Government, as represented by the # Administrator of the National Aeronautics and", "model for the schema. The model is loaded # as part of the", "['stations', 'type', 'id']} XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid', 'dateModified', 'jsonPlan', 'deleted',", "the planner. XGDS_PLANNER_TEST_SKIP_INDEX = False XGDS_PLANNER_TEST_SKIP_EDIT = False XGDS_PLANNER_TEST_SKIP_DOC = False XGDS_PLANNER_TEST_SKIP_PLAN_REST =", "= 'Save' DELETE = 'Delete' JAVASCRIPT = 'JavaScript' PYTHON = 'Python' EXEC =", "app may define some new parameters that can be modified in the Django", "in crs units XGDS_PLANNER_CRS_UNITS_DEFAULT = False # list of (formatCode, extension, exporterClass). This", "like this: from django.conf import settings print settings.FOO Don't try to get the", "* @simulatorUrl is relative to STATIC_URL. It should point to a JavaScript #", "json dictionary called extras XGDS_PLANNER_EDITOR_CONTEXT_METHOD = 'xgds_planner2.views.addToEditorContext' # Method to add stuff to", "custom map for right now. Really it should not be jammed in that", "global PIPELINE_{JS|CSS} settings dicts. If no other django-pipeline includes are defined, the relevant", "config file (siteSettings.py), must merge the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings into global PIPELINE_{JS|CSS}", "is critical because this makes it a tuple ), 'output_filename': 'js/simulator.js', } _thisDir", "of javascript files for your planner and simulator PIPELINE['JAVASCRIPT']['simulator'] = {'source_filenames': ('xgds_planner2/js/planner/genericVehicleSimulator.js', #", "{ # \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\": \"xgds_planner2.ExampleSimulator\",", "'js/planner_tests.js' } } XGDS_PLANNER_PIPELINE_CSS = { 'planner_app': { 'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css', # for", "Copyright (c) 2015, United States Government, as represented by the # Administrator of", "here. # You must also then include the javascript library that has that", "doesn't like the default value, they can override it in the site-level settings", "# 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ), 'output_filename': 'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing': { 'source_filenames':", "the # planner. Notes: # # * @schemaSource and @librarySource are paths relative", "the site-level config file (siteSettings.py), must merge the XGDS_PLANNER_PIPELINE_JS and XGDS_PLANNER_PIPELINE_CSS settings into", "#TODO to have a default site frame in the creation form, set this", "'output_filename': 'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing': { 'source_filenames': ( 'qunit/qunit/qunit.css', ), 'output_filename': 'css/planner_tests.css',", "'s' #TODO to have a default site frame in the creation form, set", "from django.conf.settings. That settings object will not know about the default value! ###", "the links tab is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null' # This is used to", "<filename>xgds_planner2/defaultSettings.py #__BEGIN_LICENSE__ # Copyright (c) 2015, United States Government, as represented by the", "include the javascript library that has that function implemented. # Dictionary should be:", "to in writing, software distributed # under the License is distributed on an", "XGDS_PLANNER_STATION_MONIKER + 's' XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER + 's' XGDS_PLANNER_COMMAND_MONIKER =", "add uploadJson into the custom map for right now. Really it should not", "with bower 'xgds_planner2_testing': { 'source_filenames': ( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ), 'output_filename': 'js/planner_tests.js' } }", "= ('IRG', 'Ames') # Method to add stuff to context for plan editor,", "# \"schemaSource\": \"apps/xgds_planner2/testing/examplePlanSchema.json\", # \"librarySource\": \"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\": \"xgds_planner2.ExampleSimulator\", #", "# This is used to hold a map of site frames, so we", "During that step, those files are processed by # compileXpjson.py and the simplified/canonical", "not know about the default value! ### # DJANGO-PIPELINE ADDENDUM: For this module", "rights reserved. # # The xGDS platform is licensed under the Apache License,", "may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # # Unless", "it in planner_app_base # XGDS_PLANNER_PLOTS['Sample'] = 'sample_plot' # Turn on to enable plan", "to true if code somewhere else overrides # some functionality in the planner.", "OVERRIDE this in your sitesettings to have a custom plan create, note that", "XGDS_PLANNER_PLAN_EXPORTERS = ( ('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj', 'xgds_planner2.planExporter.BearingDistanceJsonPlanExporter'), ('bearing_distance', '.cbdj', 'xgds_planner2.planExporter.BearingDistanceCRSJsonPlanExporter'), ('crsjson',", "('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), ) # kml root", "\"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER + 's' XGDS_PLANNER_SEGMENT_MONIKER = \"Segment\" XGDS_PLANNER_SEGMENT_MONIKER_PLURAL = XGDS_PLANNER_SEGMENT_MONIKER +", "Override this compilation of javascript files for your planner and simulator PIPELINE['JAVASCRIPT']['simulator'] =", "scheduling & flight management features in display # IMPORTANT YOU MUST INCLUDE THIS", "creation form, set this to the site id from your WITHIN plan library.", "XGDS_PLANNER_COMMAND_RENDERERS - A dict of Command type to javascript file to be used", "[os.path.join('xgds_planner2', 'templates', 'handlebars'), os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK: The", "true if code somewhere else overrides # some functionality in the planner. XGDS_PLANNER_TEST_SKIP_INDEX", "other django-pipeline includes are defined, the relevant siteSettings.py section might look like this:", "XGDS_PLANNER_DIRECTIONAL_STATIONS = True # 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS = { 'planner_app': { 'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js',", "in the # planner. Notes: # # * @schemaSource and @librarySource are paths", "defined by # the file at @simulatorUrl. # XGDS_PLANNER_SCHEMAS = { # \"test\":", "'jquery-ui-dist/jquery-ui.min.css', # for some reason compressing this in the css does not work", "# XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON), # (SAVE,'my.planner.save.callback', JAVASCRIPT)] # they will be executed", "PIPELINE_CSS.update(plannerSettings.XGDS_PLANNER_PIPELINE_CSS) # ### \"\"\" import os from geocamUtil.SettingsUtil import getOrCreateDict XGDS_PLANNER_OFFLINE = False", "= True XGDS_PLANNER_DIRECTIONAL_STATIONS = True # 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS = { 'planner_app': { 'source_filenames':", "platform is licensed under the Apache License, Version 2.0 # (the \"License\"); you", "JAVASCRIPT = 'JavaScript' PYTHON = 'Python' EXEC = 'Exec' XGDS_PLANNER_CALLBACK = [] #", "= [(MODIFY,'my.planner.modify.callback', PYTHON), # (SAVE,'my.planner.save.callback', JAVASCRIPT)] # they will be executed in order", "database table. # XGDS_PLANNER_SCHEMAS = [ # ] # XGDS_PLANNER_SCHEMAS: A list of", "'jsonPlan', 'deleted', 'readOnly', 'numStations', 'numSegments', 'numCommands', 'stats' ] } # If you have", "stuff to context for plan editor, override and register your own method if", "library that has that function implemented. # Dictionary should be: legible name: namespace", "of (formatCode, extension, exporterClass). This is the entire list of everything that #", "'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js', 'xgds_planner2/js/plannerModels.js',", "we want to add uploadJson into the custom map for right now. Really", "hold a map of site frames, so we can convert lat/long to the", "= (global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( # ... # 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED = None # Test", "\"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER + 's' #TODO to have a default site frame", "os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified name", "{ 'source_filenames': ( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ), 'output_filename': 'js/planner_tests.js' } } XGDS_PLANNER_PIPELINE_CSS = {", "= {'ol': 'xgds_planner2/js/olPlanMap.js', 'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations', 'type', 'id']} XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2']", "build/static/xgds_planner2 directory. The client-side JS # reads the simplified versions from there. #", "dicts. If no other django-pipeline includes are defined, the relevant siteSettings.py section might", "default value' If the admin for the site doesn't like the default value,", "javascript files to be included by the mapviews.js # to support custom command", "be modified in the Django settings module. Let's say one such parameter is", "### # DJANGO-PIPELINE ADDENDUM: For this module to work, the site-level config file", "You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0. # #", "code somewhere else overrides # some functionality in the planner. XGDS_PLANNER_TEST_SKIP_INDEX = False", "flight management features in display # IMPORTANT YOU MUST INCLUDE THIS IN SITE", "on the front end after modification or save. # If it is an", "some new parameters that can be modified in the Django settings module. Let's", "default value, they can override it in the site-level settings module, like this:", "KIND, either express or implied. See the License for the # specific language", "XGDS_PLANNER_CREATE_URL = \"/xgds_planner2/plan/create\" # Schema used to be set in the settings, now", "'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename': 'js/custom_map.js' } # Override this compilation of javascript files for", "# if we are using the planner we want to add uploadJson into", "and # PlanLibrary source files. One of the steps within 'manage.py prep' #", "\"simulator\": \"genericVehicle.Simulator\", # the namespace within the simulator js } } # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS", "loaded # as part of the client-side planner JS. # # * @simulator", "the JavaScript name of the simulator module defined by # the file at", "# * @simulator is the JavaScript name of the simulator module defined by", "installed with bower 'xgds_planner2_testing': { 'source_filenames': ( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ), 'output_filename': 'js/planner_tests.js' }", "module to work, the site-level config file (siteSettings.py), must merge the XGDS_PLANNER_PIPELINE_JS and", "SITE SETTINGS # TEMPLATE_CONTEXT_PROCESSORS = (global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( # ... # 'geocamUtil.context_processors.settings' XGDS_PLANNER_SCHEDULE_INCLUDED", "True XGDS_PLANNER_DIRECTIONAL_STATIONS = True # 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS = { 'planner_app': { 'source_filenames': ('jquery/dist/jquery.min.js',", "plotting values in the flot plot chart, register functions here. # You must", "dict of Command type to javascript file to be used in the mapviews.js", "list of everything that # xgds_planner provides; remove anything you won't be using.", "anything you won't be using. XGDS_PLANNER_PLAN_EXPORTERS = ( ('xpjson', '.json', 'xgds_planner2.planExporter.XpjsonPlanExporter'), ('bearing_distance', '.bdj',", "# Copyright (c) 2015, United States Government, as represented by the # Administrator", "This is the entire list of everything that # xgds_planner provides; remove anything", "'.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), ) # kml root from xgds_map_server XGDS_PLANNER_LAYER_FEED_URL = \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS =", "versions from there. # # * @simulatorUrl is relative to STATIC_URL. It should", "XGDS_PLANNER_OFFLINE = False # Don't load google earth if this is true XGDS_PLANNER_MAP_ROTATION_HANDLES", "'null' # This is used to hold a map of site frames, so", "= {'Plan': ['uuid', 'dateModified', 'jsonPlan', 'deleted', 'readOnly', 'numStations', 'numSegments', 'numCommands', 'stats' ] }", "will not know about the default value! ### # DJANGO-PIPELINE ADDENDUM: For this", "custom plan create, note that since it's in site settings you can't have", "by calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES = [] XGDS_MAP_SERVER_JS_MAP = getOrCreateDict('XGDS_MAP_SERVER_JS_MAP') XGDS_MAP_SERVER_JS_MAP['Plan'] = {'ol': 'xgds_planner2/js/olPlanMap.js',", "# If it is a Python method, it will happen on the back", "\"GenericVehicle\": { \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\", # the namespace", "have callbacks to be connected to the planner, register them as follows #", "compileXpjson.py and the simplified/canonical versions are written # to the build/static/xgds_planner2 directory. The", "used to hold a map of site frames, so we can convert lat/long", "map of site frames, so we can convert lat/long to the closest site", "available in the # planner. Notes: # # * @schemaSource and @librarySource are", "None # OVERRIDE this in your sitesettings to have a custom plan create,", "as part of the client-side planner JS. # # * @simulator is the", "is the entire list of everything that # xgds_planner provides; remove anything you", "the namespace within the simulator js } } # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to", "can be modified in the Django settings module. Let's say one such parameter", "will be executed in order either when the plan is modified or when", "planner_app # 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ), 'output_filename': 'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing': {", "the planner_app # 'backbone-forms/distribution/templates/old.css', 'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ), 'output_filename': 'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing':", "they will be executed in order either when the plan is modified or", "method, it will happen on the back end after modification or save. MODIFY", "basic planExecution. # This gets invoked from schedulePlans call in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD =", "{ 'planner_app': { 'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js',", "the closest site frame. # It is initialized by calling views.getSiteFrames(). XGDS_PLANNER_SITE_FRAMES =", "Unless required by applicable law or agreed to in writing, software distributed #", "}, 'xgds_planner2_testing': { 'source_filenames': ( 'qunit/qunit/qunit.css', ), 'output_filename': 'css/planner_tests.css', }, } PIPELINE =", "# This gets invoked from schedulePlans call in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None #", "into the custom map for right now. Really it should not be jammed", "XGDS_PLANNER_COMMAND_MONIKER = \"Command\" XGDS_PLANNER_COMMAND_MONIKER_PLURAL = XGDS_PLANNER_COMMAND_MONIKER + 's' #TODO to have a default", "PIPELINE['JAVASCRIPT'] = getOrCreateDict('PIPELINE.JAVASCRIPT') # if we are using the planner we want to", "'planner_app': { 'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js',", "have a custom plan create, note that since it's in site settings you", "schema. The model is loaded # as part of the client-side planner JS.", "sitesettings to have a custom plan create, note that since it's in site", "is used to hold a map of site frames, so we can convert", "javascript library that has that function implemented. # Dictionary should be: legible name:", "have a default site frame in the creation form, set this to the", "If you will be plotting values in the flot plot chart, register functions", "django-pipeline includes are defined, the relevant siteSettings.py section might look like this: PIPELINE_JS", "in top-level siteSettings.py #TODO update, qunit is installed with bower 'xgds_planner2_testing': { 'source_filenames':", "'model': XGDS_PLANNER_PLAN_MODEL, 'hiddenColumns': ['stations', 'type', 'id']} XGDS_DATA_MASKED_FIELDS = getOrCreateDict('XGDS_DATA_MASKED_FIELDS') XGDS_DATA_MASKED_FIELDS['xgds_planner2'] = {'Plan': ['uuid',", "client-side JS # reads the simplified versions from there. # # * @simulatorUrl", "# # Unless required by applicable law or agreed to in writing, software", "somewhere else overrides # some functionality in the planner. XGDS_PLANNER_TEST_SKIP_INDEX = False XGDS_PLANNER_TEST_SKIP_EDIT", "the back end after modification or save. # If it is a javascript", "'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ), 'output_filename': 'js/planner_tests.js' } } XGDS_PLANNER_PIPELINE_CSS = { 'planner_app': { 'source_filenames':", "'js/simulator.js', } _thisDir = os.path.dirname(__file__) # Set to true to make the bearing", "# CONDITIONS OF ANY KIND, either express or implied. See the License for", "specific language governing permissions and limitations under the License. #__END_LICENSE__ from geocamUtil.SettingsUtil import", "the mapviews.js # to support custom command rendering XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = () # XGDS_PLANNER_COMMAND_RENDERERS", "XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates', 'handlebars'), os.path.join('xgds_map_server', 'templates',", "'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json', '.json', 'xgds_planner2.planImporter.XPJsonPlanImporter'), ) # kml root from xgds_map_server", "steps within 'manage.py prep' # is 'prepapps'. During that step, those files are", "'handlebars'), os.path.join('xgds_map_server', 'templates', 'handlebars', 'search')] XGDS_PLANNER_PLAN_BEARING_HANDLEBAR_PATH = 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified", "'-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) # list of (formatCode, extension, importerClass)", "= 'JavaScript' PYTHON = 'Python' EXEC = 'Exec' XGDS_PLANNER_CALLBACK = [] # If", "# see xgds_kn for example XGDS_PLANNER_COMMAND_RENDERERS = {} # If this is defined", "= \"/xgds_map_server/treejson/\" XGDS_PLANNER_LINE_WIDTH_PIXELS = 3 XGDS_PLANNER_PLAN_MODEL = \"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER = \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL =", "to add uploadJson into the custom map for right now. Really it should", "= 'Modify' SAVE = 'Save' DELETE = 'Delete' JAVASCRIPT = 'JavaScript' PYTHON =", "be jammed in that file. PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename':", "'xgds_planner2/js/olPlanViews.js', 'xgds_planner2/js/simulatorDriver.js' ), 'output_filename': 'js/compiled_planner_app.js' }, # must create 'simulator' entry in top-level", "XGDS_PLANNER_SCHEMAS: A list of XPJSON schemas available in the # planner. Notes: #", "the License. # You may obtain a copy of the License at #", "STATIC_URL. It should point to a JavaScript # file that defines the simulator", "'xgds_planner2_testing': { 'source_filenames': ( 'external/js/qunit-1.12.0.js', 'xgds_planner2/js/tests.js', ), 'output_filename': 'js/planner_tests.js' } } XGDS_PLANNER_PIPELINE_CSS =", "for the site doesn't like the default value, they can override it in", "to javascript file to be used in the mapviews.js # to render a", "'xgds_planner2.planExporter.CrsJsonPlanExporter'), ('kml', '.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) #", "import os from geocamUtil.SettingsUtil import getOrCreateDict XGDS_PLANNER_OFFLINE = False # Don't load google", "that can be modified in the Django settings module. Let's say one such", "you are not doing the basic planExecution. # This gets invoked from schedulePlans", "views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None # OVERRIDE this in your sitesettings to have a", "the simulator js } } # XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS javascript files to be included by", "('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js',", "XGDS_PLANNER_COMMAND_RENDERER_SCRIPTS = () # XGDS_PLANNER_COMMAND_RENDERERS - A dict of Command type to javascript", "PIPELINE['JAVASCRIPT']['custom_map'] = {'source_filenames': ('xgds_planner2/js/uploadJson.js', 'xgds_map_server/js/map_viewer/olShowMapCoords.js', 'xgds_map_server/js/map_viewer/olInitialLayers.js' ), 'output_filename': 'js/custom_map.js' } # Override this", "versions are written # to the build/static/xgds_planner2 directory. The client-side JS # reads", "lat/long to the closest site frame. # It is initialized by calling views.getSiteFrames().", "a custom plan create, note that since it's in site settings you can't", "to be connected to the planner, register them as follows # XGDS_PLANNER_CALLBACK =", "under the License. #__END_LICENSE__ from geocamUtil.SettingsUtil import getOrCreateArray \"\"\" This app may define", "implemented. # Dictionary should be: legible name: namespace of library XGDS_PLANNER_PLOTS = {}", "= 'xgds_planner2/templates/xgds_planner2/bearingDistancePlan.handlebars' # XGDS_PLANNER_LINKS_LOADED_CALLBACK: The fully qualified name of an # extra JavaScript", "are defined, the relevant siteSettings.py section might look like this: PIPELINE_JS = {}", "settings.FOO Don't try to get the value of FOO from django.conf.settings. That settings", "\"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\", # the namespace within the simulator js } } #", "= None # OVERRIDE this in your sitesettings to have a custom plan", "'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js', 'backbone.babysitter/lib/backbone.babysitter.min.js', 'backbone-relational/backbone-relational.js', 'backbone-forms/distribution/backbone-forms.min.js', 'backbone.marionette/lib/backbone.marionette.min.js', 'string-format/lib/string-format.js', 'usng/usng.js', 'proj4/dist/proj4.js', 'xgds_map_server/js/util/handlebars-helpers.js', 'xgds_map_server/js/util/geo.js', 'xgds_map_server/js/util/forms.js', 'xgds_planner2/js/plannerApp.js',", "will happen on the back end after modification or save. # If it", "after the links tab is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK = 'null' # This is used", "# reads the simplified versions from there. # # * @simulatorUrl is relative", "site-level settings module, like this: FOO = 'a better value' Other modules can", "import getOrCreateDict XGDS_PLANNER_OFFLINE = False # Don't load google earth if this is", "true XGDS_PLANNER_MAP_ROTATION_HANDLES = True XGDS_PLANNER_DIRECTIONAL_STATIONS = True # 'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS = { 'planner_app':", "to add stuff to context for plan editor, override and register your own", "be in crs units XGDS_PLANNER_CRS_UNITS_DEFAULT = False # list of (formatCode, extension, exporterClass).", "you will be plotting values in the flot plot chart, register functions here.", "value for FOO is defined in this file, like this: FOO = 'my", "'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) # list of (formatCode, extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS = ( ('kml', '.kml',", "limitations under the License. #__END_LICENSE__ from geocamUtil.SettingsUtil import getOrCreateArray \"\"\" This app may", "XGDS_PLANNER_PIPELINE_CSS settings into global PIPELINE_{JS|CSS} settings dicts. If no other django-pipeline includes are", "The default value for FOO is defined in this file, like this: FOO", "stuff to planExecution if you are not doing the basic planExecution. # This", "PIPELINE_{JS|CSS} settings dicts. If no other django-pipeline includes are defined, the relevant siteSettings.py", "files are processed by # compileXpjson.py and the simplified/canonical versions are written #", "(formatCode, extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS = ( ('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'), ('json',", "& flight management features in display # IMPORTANT YOU MUST INCLUDE THIS IN", "files to be included by the mapviews.js # to support custom command rendering", "reads the simplified versions from there. # # * @simulatorUrl is relative to", "XGDS_PLANNER_TEST_SKIP_PLAN_EXPORT = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN_PAGE = False XGDS_PLANNER_TEST_SKIP_CREATE_PLAN = False XGDS_PLANNER_HANDLEBARS_DIRS = [os.path.join('xgds_planner2', 'templates',", "in your sitesettings to have a custom plan create, note that since it's", "extra JavaScript callback to call after the links tab is loaded. XGDS_PLANNER_LINKS_LOADED_CALLBACK =", "as follows # XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON), # (SAVE,'my.planner.save.callback', JAVASCRIPT)] # they will", "You must also then include the javascript library that has that function implemented.", "License. #__END_LICENSE__ from geocamUtil.SettingsUtil import getOrCreateArray \"\"\" This app may define some new", "\"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\", # the namespace within the", "editor, override and register your own method if you need it. # It", "This gets invoked from schedulePlans call in views.py XGDS_PLANNER_SCHEDULE_EXTRAS_METHOD = None # OVERRIDE", "chart, register functions here. # You must also then include the javascript library", "# }, \"GenericVehicle\": { \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\", #", "# specific language governing permissions and limitations under the License. #__END_LICENSE__ from geocamUtil.SettingsUtil", "'exec' method, it will happen on the back end after modification or save.", "\"xgds_planner2.ExampleSimulator\", # }, \"GenericVehicle\": { \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\",", "XGDS_PLANNER_CALLBACK = [(MODIFY,'my.planner.modify.callback', PYTHON), # (SAVE,'my.planner.save.callback', JAVASCRIPT)] # they will be executed in", "'prepapps'. During that step, those files are processed by # compileXpjson.py and the", "\"xgds_planner2.Plan\" XGDS_PLANNER_PLAN_MONIKER = \"Plan\" XGDS_PLANNER_PLAN_EXECUTION_MODEL = \"xgds_planner2.PlanExecution\" XGDS_PLANNER_STATION_MONIKER = \"Station\" XGDS_PLANNER_STATION_MONIKER_PLURAL = XGDS_PLANNER_STATION_MONIKER", "point to a JavaScript # file that defines the simulator model for the", "'external/js/jquery/jquery.migrate.min.js', XGDS_PLANNER_PIPELINE_JS = { 'planner_app': { 'source_filenames': ('jquery/dist/jquery.min.js', 'jquery-migrate-official/src/migrate.js', 'jquery-ui-dist/jquery-ui.min.js', 'handlebars/dist/handlebars.min.js', 'backbone/backbone.js', 'backbone.wreqr/lib/backbone.wreqr.min.js',", "except in compliance with the License. # You may obtain a copy of", "'.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) # list of (formatCode, extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS = ( ('kml',", "This is used to hold a map of site frames, so we can", "may not use this file except in compliance with the License. # You", "'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing': { 'source_filenames': ( 'qunit/qunit/qunit.css', ), 'output_filename': 'css/planner_tests.css', },", "'xgds_planner2/css/planner.css', #'xgds_planner2/css/forms_adjust.css', ), 'output_filename': 'css/planner_app.css', 'template_name': 'xgds_planner2/pipelineCSS.css', }, 'xgds_planner2_testing': { 'source_filenames': ( 'qunit/qunit/qunit.css',", "is licensed under the Apache License, Version 2.0 # (the \"License\"); you may", "as represented by the # Administrator of the National Aeronautics and Space Administration.", "way. # see xgds_kn for example XGDS_PLANNER_COMMAND_RENDERERS = {} # If this is", "'.kml', 'xgds_planner2.kmlPlanExporter.KmlPlanExporter'), ('stats', '-stats.json', 'xgds_planner2.statsPlanExporter.StatsPlanExporter'), # ('pml', '.pml', 'xgds_planner2.pmlPlanExporter.PmlPlanExporter'), ) # list of", "@schemaSource and @librarySource are paths relative to the PROJ_ROOT # base directory for", "XGDS_PLANNER_CALLBACK = [] # If you will be plotting values in the flot", "), 'output_filename': 'js/planner_tests.js' } } XGDS_PLANNER_PIPELINE_CSS = { 'planner_app': { 'source_filenames': ( 'jquery-ui-dist/jquery-ui.min.css',", "\"apps/xgds_planner2/testing/examplePlanLibrary.json\", # \"simulatorUrl\": \"xgds_planner2/testing/exampleSimulator.js\", # \"simulator\": \"xgds_planner2.ExampleSimulator\", # }, \"GenericVehicle\": { \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\",", "if we are using the planner we want to add uploadJson into the", "of (formatCode, extension, importerClass) XGDS_PLANNER_PLAN_IMPORTERS = ( ('kml', '.kml', 'xgds_planner2.kmlPlanImporter.KmlLineStringPlanImporter'), ('csv', '.csv', 'xgds_planner2.csvPlanImporter.CSVPlanImporter'),", "siteSettings.py #TODO update, qunit is installed with bower 'xgds_planner2_testing': { 'source_filenames': ( 'external/js/qunit-1.12.0.js',", "{ \"schemaSource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanSchema.json\", \"librarySource\": \"apps/xgds_planner2/js/xgds_planner2/planner/genericVehiclePlanLibrary.json\", \"simulatorUrl\": \"xgds_planner2/js/planner/genericVehicleSimulator.js\", \"simulator\": \"genericVehicle.Simulator\", # the namespace within", "XGDS_PLANNER_SCHEMAS = [ # ] # XGDS_PLANNER_SCHEMAS: A list of XPJSON schemas available", "} # If you have callbacks to be connected to the planner, register", "United States Government, as represented by the # Administrator of the National Aeronautics", "{} # If this is defined (true) then include the scheduling & flight", "if you need it. # It must add a json dictionary called extras", "for the schema. The model is loaded # as part of the client-side", "to context for plan editor, override and register your own method if you", "back end after modification or save. # If it is a javascript method," ]
[ "import Parser from email.header import decode_header from email.utils import parseaddr from datetime import", "smtplib import os import poplib from email.parser import Parser from email.header import decode_header", "not None else MIMEText(text, 'plain', 'utf-8') msg.attach(mime_text) for file in files: if isinstance(file,", "msg = MIMEMultipart('mixed') msg['Subject'] = subject msg['From'] = self._sender msg['To'] = \";\".join(to) msg['Cc']", "class Mail(object): def __init__(self, server, port, username, password, sender): self._server = server self._port", "res def _parse_message(self, msg): result = {} # Subject subject_tmp = msg.get('Subject', '')", "text=None, html=None, files=[]): try: # 构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型 msg = MIMEMultipart('mixed') msg['Subject'] =", "result['Bodys'] = [] for par in msg.walk(): name = par.get_filename() if name: data", "value.decode(charset) tmp_addr_info = dict(name=value, addr=addr) result[header].append(tmp_addr_info) try: result['Date'] = datetime.strptime(msg.get('Date', ''), \"%a, %d", "datetime.strptime(msg.get('Date', ''), \"%a, %d %b %Y %H:%M:%S +0800\").strftime(self.SF) except Exception,e: result['Date'] = str(msg.get('Date',", "== '': continue name, addr = parseaddr(i) value, charset = decode_header(name)[0] if charset:", "int: mails = self.pop3_server.list()[1] if arg > len(mails): res[arg] = None continue resp,", "files=[]): try: # 构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型 msg = MIMEMultipart('mixed') msg['Subject'] = subject msg['From']", "try: result['Date'] = datetime.strptime(msg.get('Date', ''), \"%a, %d %b %Y %H:%M:%S +0800\").strftime(self.SF) except Exception,e:", "msg.get(header, '') temp_list = temp.split(',') for i in temp_list: if i == '':", "= \";\".join(to) msg['Cc'] = \";\".join(cc) mime_text = MIMEText(html, 'html', 'utf-8') if html is", "= self._parse_message(msg) elif type(arg) == int: mails = self.pop3_server.list()[1] if arg > len(mails):", "files: if isinstance(file, str): file = file.decode(\"utf-8\") basename = os.path.basename(file) # 构造附件 sendfile", "coding:utf-8 -*- from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import smtplib import", "i == '': continue name, addr = parseaddr(i) value, charset = decode_header(name)[0] if", "self.args_pop_server = pop_server self.args_user = user self.args_password = password self._restart() def quit(self): if", "send_result class MailServer(object): SF = \"%Y-%m-%d %H:%M:%S\" pop3_server = None args_pop_server = None", "tmp_pop3_server def get(self, *args): self._restart() res = {} for arg in args: if", "= poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server = tmp_pop3_server def get(self, *args): self._restart() res =", "if charset: value = value.decode(charset) result['Subject'] = value # 'From', 'To', 'Cc' for", "Subject subject_tmp = msg.get('Subject', '') value, charset = decode_header(subject_tmp)[0] if charset: value =", "= Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) else: res[arg] = None return res def _parse_message(self,", "msg['From'] = self._sender msg['To'] = \";\".join(to) msg['Cc'] = \";\".join(cc) mime_text = MIMEText(html, 'html',", "text_att = MIMEText(sendfile, 'base64', 'utf-8') text_att[\"Content-Type\"] = 'application/octet-stream' text_att[\"Content-Disposition\"] = 'attachment; filename=%s' %", "= self.pop3_server.retr(arg) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) else: res[arg] = None return", "msg['To'] = \";\".join(to) msg['Cc'] = \";\".join(cc) mime_text = MIMEText(html, 'html', 'utf-8') if html", "decode_header from email.utils import parseaddr from datetime import datetime class Mail(object): def __init__(self,", "== 'list': res[arg] = self.pop3_server.list() elif arg == 'latest': mails = self.pop3_server.list()[1] resp,", "= os.path.basename(file) # 构造附件 sendfile = open(file, 'rb').read() text_att = MIMEText(sendfile, 'base64', 'utf-8')", "smtp = smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username, self._password) err = smtp.sendmail(self._sender, to+cc, msg.as_string())", "res[arg] = None continue resp, lines, octets = self.pop3_server.retr(arg) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg]", "result['Files'] = [] result['Bodys'] = [] for par in msg.walk(): name = par.get_filename()", "%b %Y %H:%M:%S +0800\").strftime(self.SF) except Exception,e: result['Date'] = str(msg.get('Date', '')) result['Files'] = []", "import poplib from email.parser import Parser from email.header import decode_header from email.utils import", "= self.pop3_server.retr(len(mails)) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) elif type(arg) == int: mails", "= temp.split(',') for i in temp_list: if i == '': continue name, addr", "= smtp.sendmail(self._sender, to+cc, msg.as_string()) smtp.close() if not err: send_result = True, None else:", "[] result['Bodys'] = [] for par in msg.walk(): name = par.get_filename() if name:", "for header in ['From', 'To', 'Cc']: result[header] = [] temp = msg.get(header, '')", "elif arg == 'list': res[arg] = self.pop3_server.list() elif arg == 'latest': mails =", "'') value, charset = decode_header(subject_tmp)[0] if charset: value = value.decode(charset) result['Subject'] = value", "self.pop3_server = None def _restart(self): self.quit() tmp_pop3_server = poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server =", "[] temp = msg.get(header, '') temp_list = temp.split(',') for i in temp_list: if", "= self.pop3_server.list()[1] if arg > len(mails): res[arg] = None continue resp, lines, octets", "file.decode(\"utf-8\") basename = os.path.basename(file) # 构造附件 sendfile = open(file, 'rb').read() text_att = MIMEText(sendfile,", "datetime class Mail(object): def __init__(self, server, port, username, password, sender): self._server = server", "password, sender): self._server = server self._port = port self._username = username self._password =", "= value.decode(charset) tmp_addr_info = dict(name=value, addr=addr) result[header].append(tmp_addr_info) try: result['Date'] = datetime.strptime(msg.get('Date', ''), \"%a,", "'Cc' for header in ['From', 'To', 'Cc']: result[header] = [] temp = msg.get(header,", "MIMEText(text, 'plain', 'utf-8') msg.attach(mime_text) for file in files: if isinstance(file, str): file =", "from email.header import decode_header from email.utils import parseaddr from datetime import datetime class", "\";\".join(cc) mime_text = MIMEText(html, 'html', 'utf-8') if html is not None else MIMEText(text,", "return res def _parse_message(self, msg): result = {} # Subject subject_tmp = msg.get('Subject',", "%d %b %Y %H:%M:%S +0800\").strftime(self.SF) except Exception,e: result['Date'] = str(msg.get('Date', '')) result['Files'] =", "__init__(self, server, port, username, password, sender): self._server = server self._port = port self._username", "subject msg['From'] = self._sender msg['To'] = \";\".join(to) msg['Cc'] = \";\".join(cc) mime_text = MIMEText(html,", "> len(mails): res[arg] = None continue resp, lines, octets = self.pop3_server.retr(arg) msg =", "addr = parseaddr(i) value, charset = decode_header(name)[0] if charset: value = value.decode(charset) tmp_addr_info", "result['Date'] = str(msg.get('Date', '')) result['Files'] = [] result['Bodys'] = [] for par in", "= MIMEText(sendfile, 'base64', 'utf-8') text_att[\"Content-Type\"] = 'application/octet-stream' text_att[\"Content-Disposition\"] = 'attachment; filename=%s' % basename.encode(\"gb2312\")", "= 'attachment; filename=%s' % basename.encode(\"gb2312\") msg.attach(text_att) # 发送邮件 smtp = smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0)", "Exception,e: result['Date'] = str(msg.get('Date', '')) result['Files'] = [] result['Bodys'] = [] for par", "not None: self.pop3_server.quit() self.pop3_server = None def _restart(self): self.quit() tmp_pop3_server = poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user)", "type(arg) == int: mails = self.pop3_server.list()[1] if arg > len(mails): res[arg] = None", "result[header] = [] temp = msg.get(header, '') temp_list = temp.split(',') for i in", "addr=addr) result[header].append(tmp_addr_info) try: result['Date'] = datetime.strptime(msg.get('Date', ''), \"%a, %d %b %Y %H:%M:%S +0800\").strftime(self.SF)", "from email.parser import Parser from email.header import decode_header from email.utils import parseaddr from", "import datetime class Mail(object): def __init__(self, server, port, username, password, sender): self._server =", "None args_password = None def __init__(self, pop_server, user, password): self.args_pop_server = pop_server self.args_user", "def _restart(self): self.quit() tmp_pop3_server = poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server = tmp_pop3_server def get(self,", "res[arg] = self.pop3_server.stat()[0] elif arg == 'list': res[arg] = self.pop3_server.list() elif arg ==", "is not None: self.pop3_server.quit() self.pop3_server = None def _restart(self): self.quit() tmp_pop3_server = poplib.POP3(self.args_pop_server)", "msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) else: res[arg] = None return res def", "None continue resp, lines, octets = self.pop3_server.retr(arg) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg)", "if isinstance(file, str): file = file.decode(\"utf-8\") basename = os.path.basename(file) # 构造附件 sendfile =", "text_att[\"Content-Disposition\"] = 'attachment; filename=%s' % basename.encode(\"gb2312\") msg.attach(text_att) # 发送邮件 smtp = smtplib.SMTP_SSL(self._server, self._port)", "os.path.basename(file) # 构造附件 sendfile = open(file, 'rb').read() text_att = MIMEText(sendfile, 'base64', 'utf-8') text_att[\"Content-Type\"]", "res[arg] = self._parse_message(msg) elif type(arg) == int: mails = self.pop3_server.list()[1] if arg >", "else: res[arg] = None return res def _parse_message(self, msg): result = {} #", "data=data)) else: body = par.get_payload(decode=True) if body is not None: result['Bodys'].append(dict(body=body)) return result", "self.pop3_server is not None: self.pop3_server.quit() self.pop3_server = None def _restart(self): self.quit() tmp_pop3_server =", "self.args_user = user self.args_password = password self._restart() def quit(self): if self.pop3_server is not", "value, charset = decode_header(subject_tmp)[0] if charset: value = value.decode(charset) result['Subject'] = value #", "in temp_list: if i == '': continue name, addr = parseaddr(i) value, charset", "from email.utils import parseaddr from datetime import datetime class Mail(object): def __init__(self, server,", "continue name, addr = parseaddr(i) value, charset = decode_header(name)[0] if charset: value =", "= msg.get('Subject', '') value, charset = decode_header(subject_tmp)[0] if charset: value = value.decode(charset) result['Subject']", "= False, err except Exception, e: send_result = False, e return send_result class", "= [] temp = msg.get(header, '') temp_list = temp.split(',') for i in temp_list:", "False, e return send_result class MailServer(object): SF = \"%Y-%m-%d %H:%M:%S\" pop3_server = None", "'')) result['Files'] = [] result['Bodys'] = [] for par in msg.walk(): name =", "== 'stat': res[arg] = self.pop3_server.stat()[0] elif arg == 'list': res[arg] = self.pop3_server.list() elif", "temp.split(',') for i in temp_list: if i == '': continue name, addr =", "msg.as_string()) smtp.close() if not err: send_result = True, None else: send_result = False,", "args_password = None def __init__(self, pop_server, user, password): self.args_pop_server = pop_server self.args_user =", "-*- from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import smtplib import os", "len(mails): res[arg] = None continue resp, lines, octets = self.pop3_server.retr(arg) msg = Parser().parsestr(b'\\r\\n'.join(lines))", "if html is not None else MIMEText(text, 'plain', 'utf-8') msg.attach(mime_text) for file in", "email.mime.text import MIMEText import smtplib import os import poplib from email.parser import Parser", "None return res def _parse_message(self, msg): result = {} # Subject subject_tmp =", "temp_list: if i == '': continue name, addr = parseaddr(i) value, charset =", "= False, e return send_result class MailServer(object): SF = \"%Y-%m-%d %H:%M:%S\" pop3_server =", "self._username = username self._password = password self._sender = sender def send(self, subject, to,", "= None def __init__(self, pop_server, user, password): self.args_pop_server = pop_server self.args_user = user", "text_att[\"Content-Type\"] = 'application/octet-stream' text_att[\"Content-Disposition\"] = 'attachment; filename=%s' % basename.encode(\"gb2312\") msg.attach(text_att) # 发送邮件 smtp", "octets = self.pop3_server.retr(arg) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) else: res[arg] = None", "charset = decode_header(name)[0] if charset: value = value.decode(charset) tmp_addr_info = dict(name=value, addr=addr) result[header].append(tmp_addr_info)", "arg > len(mails): res[arg] = None continue resp, lines, octets = self.pop3_server.retr(arg) msg", "decode_header(subject_tmp)[0] if charset: value = value.decode(charset) result['Subject'] = value # 'From', 'To', 'Cc'", "'To', 'Cc' for header in ['From', 'To', 'Cc']: result[header] = [] temp =", "self._password = password self._sender = sender def send(self, subject, to, cc=[], text=None, html=None,", "quit(self): if self.pop3_server is not None: self.pop3_server.quit() self.pop3_server = None def _restart(self): self.quit()", "value # 'From', 'To', 'Cc' for header in ['From', 'To', 'Cc']: result[header] =", "self.pop3_server.list() elif arg == 'latest': mails = self.pop3_server.list()[1] resp, lines, octets = self.pop3_server.retr(len(mails))", "'utf-8') if html is not None else MIMEText(text, 'plain', 'utf-8') msg.attach(mime_text) for file", "None def _restart(self): self.quit() tmp_pop3_server = poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server = tmp_pop3_server def", "email.header import decode_header from email.utils import parseaddr from datetime import datetime class Mail(object):", "'plain', 'utf-8') msg.attach(mime_text) for file in files: if isinstance(file, str): file = file.decode(\"utf-8\")", "= {} # Subject subject_tmp = msg.get('Subject', '') value, charset = decode_header(subject_tmp)[0] if", "in args: if arg == 'stat': res[arg] = self.pop3_server.stat()[0] elif arg == 'list':", "= server self._port = port self._username = username self._password = password self._sender =", "= pop_server self.args_user = user self.args_password = password self._restart() def quit(self): if self.pop3_server", "'utf-8') text_att[\"Content-Type\"] = 'application/octet-stream' text_att[\"Content-Disposition\"] = 'attachment; filename=%s' % basename.encode(\"gb2312\") msg.attach(text_att) # 发送邮件", "smtp.close() if not err: send_result = True, None else: send_result = False, err", "from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import smtplib import os import", "Parser from email.header import decode_header from email.utils import parseaddr from datetime import datetime", "'') temp_list = temp.split(',') for i in temp_list: if i == '': continue", "= tmp_pop3_server def get(self, *args): self._restart() res = {} for arg in args:", "= file.decode(\"utf-8\") basename = os.path.basename(file) # 构造附件 sendfile = open(file, 'rb').read() text_att =", "par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data)) else: body = par.get_payload(decode=True) if body is not None: result['Bodys'].append(dict(body=body))", "msg): result = {} # Subject subject_tmp = msg.get('Subject', '') value, charset =", "'': continue name, addr = parseaddr(i) value, charset = decode_header(name)[0] if charset: value", "= par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data)) else: body = par.get_payload(decode=True) if body is not None:", "decode_header(name)[0] if charset: value = value.decode(charset) tmp_addr_info = dict(name=value, addr=addr) result[header].append(tmp_addr_info) try: result['Date']", "username self._password = password self._sender = sender def send(self, subject, to, cc=[], text=None,", "subject, to, cc=[], text=None, html=None, files=[]): try: # 构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型 msg =", "poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server = tmp_pop3_server def get(self, *args): self._restart() res = {}", "= datetime.strptime(msg.get('Date', ''), \"%a, %d %b %Y %H:%M:%S +0800\").strftime(self.SF) except Exception,e: result['Date'] =", "e return send_result class MailServer(object): SF = \"%Y-%m-%d %H:%M:%S\" pop3_server = None args_pop_server", "elif type(arg) == int: mails = self.pop3_server.list()[1] if arg > len(mails): res[arg] =", "elif arg == 'latest': mails = self.pop3_server.list()[1] resp, lines, octets = self.pop3_server.retr(len(mails)) msg", "sender): self._server = server self._port = port self._username = username self._password = password", "isinstance(file, str): file = file.decode(\"utf-8\") basename = os.path.basename(file) # 构造附件 sendfile = open(file,", "arg == 'latest': mails = self.pop3_server.list()[1] resp, lines, octets = self.pop3_server.retr(len(mails)) msg =", "self.pop3_server.retr(arg) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) else: res[arg] = None return res", "MIMEMultipart from email.mime.text import MIMEText import smtplib import os import poplib from email.parser", "self._port) smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username, self._password) err = smtp.sendmail(self._sender, to+cc, msg.as_string()) smtp.close() if not", "None: self.pop3_server.quit() self.pop3_server = None def _restart(self): self.quit() tmp_pop3_server = poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password)", "+0800\").strftime(self.SF) except Exception,e: result['Date'] = str(msg.get('Date', '')) result['Files'] = [] result['Bodys'] = []", "True, None else: send_result = False, err except Exception, e: send_result = False,", "if name: data = par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data)) else: body = par.get_payload(decode=True) if body", "def get(self, *args): self._restart() res = {} for arg in args: if arg", "charset = decode_header(subject_tmp)[0] if charset: value = value.decode(charset) result['Subject'] = value # 'From',", "result['Subject'] = value # 'From', 'To', 'Cc' for header in ['From', 'To', 'Cc']:", "= MIMEMultipart('mixed') msg['Subject'] = subject msg['From'] = self._sender msg['To'] = \";\".join(to) msg['Cc'] =", "lines, octets = self.pop3_server.retr(arg) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) else: res[arg] =", "Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) elif type(arg) == int: mails = self.pop3_server.list()[1] if arg", "email.parser import Parser from email.header import decode_header from email.utils import parseaddr from datetime", "None else: send_result = False, err except Exception, e: send_result = False, e", "basename = os.path.basename(file) # 构造附件 sendfile = open(file, 'rb').read() text_att = MIMEText(sendfile, 'base64',", "tmp_pop3_server.pass_(self.args_password) self.pop3_server = tmp_pop3_server def get(self, *args): self._restart() res = {} for arg", "= msg.get(header, '') temp_list = temp.split(',') for i in temp_list: if i ==", "arg == 'stat': res[arg] = self.pop3_server.stat()[0] elif arg == 'list': res[arg] = self.pop3_server.list()", "file = file.decode(\"utf-8\") basename = os.path.basename(file) # 构造附件 sendfile = open(file, 'rb').read() text_att", "self._parse_message(msg) elif type(arg) == int: mails = self.pop3_server.list()[1] if arg > len(mails): res[arg]", "if charset: value = value.decode(charset) tmp_addr_info = dict(name=value, addr=addr) result[header].append(tmp_addr_info) try: result['Date'] =", "result['Date'] = datetime.strptime(msg.get('Date', ''), \"%a, %d %b %Y %H:%M:%S +0800\").strftime(self.SF) except Exception,e: result['Date']", "name = par.get_filename() if name: data = par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data)) else: body =", "import parseaddr from datetime import datetime class Mail(object): def __init__(self, server, port, username,", "if not err: send_result = True, None else: send_result = False, err except", "class MailServer(object): SF = \"%Y-%m-%d %H:%M:%S\" pop3_server = None args_pop_server = None args_user", "in ['From', 'To', 'Cc']: result[header] = [] temp = msg.get(header, '') temp_list =", "html=None, files=[]): try: # 构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型 msg = MIMEMultipart('mixed') msg['Subject'] = subject", "send_result = False, err except Exception, e: send_result = False, e return send_result", "SF = \"%Y-%m-%d %H:%M:%S\" pop3_server = None args_pop_server = None args_user = None", "= username self._password = password self._sender = sender def send(self, subject, to, cc=[],", "str): file = file.decode(\"utf-8\") basename = os.path.basename(file) # 构造附件 sendfile = open(file, 'rb').read()", "for arg in args: if arg == 'stat': res[arg] = self.pop3_server.stat()[0] elif arg", "err except Exception, e: send_result = False, e return send_result class MailServer(object): SF", "import MIMEMultipart from email.mime.text import MIMEText import smtplib import os import poplib from", "username, password, sender): self._server = server self._port = port self._username = username self._password", "filename=%s' % basename.encode(\"gb2312\") msg.attach(text_att) # 发送邮件 smtp = smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username,", "= self.pop3_server.stat()[0] elif arg == 'list': res[arg] = self.pop3_server.list() elif arg == 'latest':", "# 发送邮件 smtp = smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username, self._password) err = smtp.sendmail(self._sender,", "'From', 'To', 'Cc' for header in ['From', 'To', 'Cc']: result[header] = [] temp", "self._restart() res = {} for arg in args: if arg == 'stat': res[arg]", "args: if arg == 'stat': res[arg] = self.pop3_server.stat()[0] elif arg == 'list': res[arg]", "from datetime import datetime class Mail(object): def __init__(self, server, port, username, password, sender):", "self.pop3_server.retr(len(mails)) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) elif type(arg) == int: mails =", "octets = self.pop3_server.retr(len(mails)) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) elif type(arg) == int:", "= self._parse_message(msg) else: res[arg] = None return res def _parse_message(self, msg): result =", "= None return res def _parse_message(self, msg): result = {} # Subject subject_tmp", "result[header].append(tmp_addr_info) try: result['Date'] = datetime.strptime(msg.get('Date', ''), \"%a, %d %b %Y %H:%M:%S +0800\").strftime(self.SF) except", "temp_list = temp.split(',') for i in temp_list: if i == '': continue name,", "continue resp, lines, octets = self.pop3_server.retr(arg) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) else:", "self.quit() tmp_pop3_server = poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server = tmp_pop3_server def get(self, *args): self._restart()", "MIMEText(sendfile, 'base64', 'utf-8') text_att[\"Content-Type\"] = 'application/octet-stream' text_att[\"Content-Disposition\"] = 'attachment; filename=%s' % basename.encode(\"gb2312\") msg.attach(text_att)", "value, charset = decode_header(name)[0] if charset: value = value.decode(charset) tmp_addr_info = dict(name=value, addr=addr)", "= user self.args_password = password self._restart() def quit(self): if self.pop3_server is not None:", "result['Files'].append(dict(name=name, data=data)) else: body = par.get_payload(decode=True) if body is not None: result['Bodys'].append(dict(body=body)) return", "__init__(self, pop_server, user, password): self.args_pop_server = pop_server self.args_user = user self.args_password = password", "dict(name=value, addr=addr) result[header].append(tmp_addr_info) try: result['Date'] = datetime.strptime(msg.get('Date', ''), \"%a, %d %b %Y %H:%M:%S", "MIMEText import smtplib import os import poplib from email.parser import Parser from email.header", "parseaddr(i) value, charset = decode_header(name)[0] if charset: value = value.decode(charset) tmp_addr_info = dict(name=value,", "= dict(name=value, addr=addr) result[header].append(tmp_addr_info) try: result['Date'] = datetime.strptime(msg.get('Date', ''), \"%a, %d %b %Y", "self._restart() def quit(self): if self.pop3_server is not None: self.pop3_server.quit() self.pop3_server = None def", "构造附件 sendfile = open(file, 'rb').read() text_att = MIMEText(sendfile, 'base64', 'utf-8') text_att[\"Content-Type\"] = 'application/octet-stream'", "server self._port = port self._username = username self._password = password self._sender = sender", "self._server = server self._port = port self._username = username self._password = password self._sender", "err = smtp.sendmail(self._sender, to+cc, msg.as_string()) smtp.close() if not err: send_result = True, None", "email.utils import parseaddr from datetime import datetime class Mail(object): def __init__(self, server, port,", "subject_tmp = msg.get('Subject', '') value, charset = decode_header(subject_tmp)[0] if charset: value = value.decode(charset)", "msg.attach(mime_text) for file in files: if isinstance(file, str): file = file.decode(\"utf-8\") basename =", "= Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) elif type(arg) == int: mails = self.pop3_server.list()[1] if", "def _parse_message(self, msg): result = {} # Subject subject_tmp = msg.get('Subject', '') value,", "def __init__(self, server, port, username, password, sender): self._server = server self._port = port", "arg in args: if arg == 'stat': res[arg] = self.pop3_server.stat()[0] elif arg ==", "arg == 'list': res[arg] = self.pop3_server.list() elif arg == 'latest': mails = self.pop3_server.list()[1]", "= password self._sender = sender def send(self, subject, to, cc=[], text=None, html=None, files=[]):", "= MIMEText(html, 'html', 'utf-8') if html is not None else MIMEText(text, 'plain', 'utf-8')", "value = value.decode(charset) tmp_addr_info = dict(name=value, addr=addr) result[header].append(tmp_addr_info) try: result['Date'] = datetime.strptime(msg.get('Date', ''),", "self.pop3_server = tmp_pop3_server def get(self, *args): self._restart() res = {} for arg in", "self._port = port self._username = username self._password = password self._sender = sender def", "pop_server, user, password): self.args_pop_server = pop_server self.args_user = user self.args_password = password self._restart()", "import MIMEText import smtplib import os import poplib from email.parser import Parser from", "get(self, *args): self._restart() res = {} for arg in args: if arg ==", "if self.pop3_server is not None: self.pop3_server.quit() self.pop3_server = None def _restart(self): self.quit() tmp_pop3_server", "password self._sender = sender def send(self, subject, to, cc=[], text=None, html=None, files=[]): try:", "res[arg] = self.pop3_server.list() elif arg == 'latest': mails = self.pop3_server.list()[1] resp, lines, octets", "'html', 'utf-8') if html is not None else MIMEText(text, 'plain', 'utf-8') msg.attach(mime_text) for", "user self.args_password = password self._restart() def quit(self): if self.pop3_server is not None: self.pop3_server.quit()", "'attachment; filename=%s' % basename.encode(\"gb2312\") msg.attach(text_att) # 发送邮件 smtp = smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0) smtp.ehlo()", "= {} for arg in args: if arg == 'stat': res[arg] = self.pop3_server.stat()[0]", "e: send_result = False, e return send_result class MailServer(object): SF = \"%Y-%m-%d %H:%M:%S\"", "# 构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型 msg = MIMEMultipart('mixed') msg['Subject'] = subject msg['From'] = self._sender", "open(file, 'rb').read() text_att = MIMEText(sendfile, 'base64', 'utf-8') text_att[\"Content-Type\"] = 'application/octet-stream' text_att[\"Content-Disposition\"] = 'attachment;", "html is not None else MIMEText(text, 'plain', 'utf-8') msg.attach(mime_text) for file in files:", "for par in msg.walk(): name = par.get_filename() if name: data = par.get_payload(decode=True) result['Files'].append(dict(name=name,", "value.decode(charset) result['Subject'] = value # 'From', 'To', 'Cc' for header in ['From', 'To',", "file in files: if isinstance(file, str): file = file.decode(\"utf-8\") basename = os.path.basename(file) #", "MIMEMultipart('mixed') msg['Subject'] = subject msg['From'] = self._sender msg['To'] = \";\".join(to) msg['Cc'] = \";\".join(cc)", "is not None else MIMEText(text, 'plain', 'utf-8') msg.attach(mime_text) for file in files: if", "in msg.walk(): name = par.get_filename() if name: data = par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data)) else:", "smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username, self._password) err = smtp.sendmail(self._sender, to+cc, msg.as_string()) smtp.close() if not err:", "self.pop3_server.list()[1] resp, lines, octets = self.pop3_server.retr(len(mails)) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) elif", "mails = self.pop3_server.list()[1] resp, lines, octets = self.pop3_server.retr(len(mails)) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] =", "if i == '': continue name, addr = parseaddr(i) value, charset = decode_header(name)[0]", "email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import smtplib import os import poplib", "except Exception, e: send_result = False, e return send_result class MailServer(object): SF =", "msg['Cc'] = \";\".join(cc) mime_text = MIMEText(html, 'html', 'utf-8') if html is not None", "port, username, password, sender): self._server = server self._port = port self._username = username", "pop3_server = None args_pop_server = None args_user = None args_password = None def", "*args): self._restart() res = {} for arg in args: if arg == 'stat':", "return send_result class MailServer(object): SF = \"%Y-%m-%d %H:%M:%S\" pop3_server = None args_pop_server =", "def __init__(self, pop_server, user, password): self.args_pop_server = pop_server self.args_user = user self.args_password =", "Exception, e: send_result = False, e return send_result class MailServer(object): SF = \"%Y-%m-%d", "mime_text = MIMEText(html, 'html', 'utf-8') if html is not None else MIMEText(text, 'plain',", "import os import poplib from email.parser import Parser from email.header import decode_header from", "= self.pop3_server.list()[1] resp, lines, octets = self.pop3_server.retr(len(mails)) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg)", "['From', 'To', 'Cc']: result[header] = [] temp = msg.get(header, '') temp_list = temp.split(',')", "MIMEText(html, 'html', 'utf-8') if html is not None else MIMEText(text, 'plain', 'utf-8') msg.attach(mime_text)", "= True, None else: send_result = False, err except Exception, e: send_result =", "os import poplib from email.parser import Parser from email.header import decode_header from email.utils", "args_pop_server = None args_user = None args_password = None def __init__(self, pop_server, user,", "send(self, subject, to, cc=[], text=None, html=None, files=[]): try: # 构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型 msg", "i in temp_list: if i == '': continue name, addr = parseaddr(i) value,", "None def __init__(self, pop_server, user, password): self.args_pop_server = pop_server self.args_user = user self.args_password", "not err: send_result = True, None else: send_result = False, err except Exception,", "import smtplib import os import poplib from email.parser import Parser from email.header import", "'rb').read() text_att = MIMEText(sendfile, 'base64', 'utf-8') text_att[\"Content-Type\"] = 'application/octet-stream' text_att[\"Content-Disposition\"] = 'attachment; filename=%s'", "= subject msg['From'] = self._sender msg['To'] = \";\".join(to) msg['Cc'] = \";\".join(cc) mime_text =", "= self.pop3_server.list() elif arg == 'latest': mails = self.pop3_server.list()[1] resp, lines, octets =", "tmp_pop3_server = poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server = tmp_pop3_server def get(self, *args): self._restart() res", "tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server = tmp_pop3_server def get(self, *args): self._restart() res = {} for", "sender def send(self, subject, to, cc=[], text=None, html=None, files=[]): try: # 构造邮件对象MIMEMultipart对象 #", "basename.encode(\"gb2312\") msg.attach(text_att) # 发送邮件 smtp = smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username, self._password) err", "resp, lines, octets = self.pop3_server.retr(len(mails)) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) elif type(arg)", "= open(file, 'rb').read() text_att = MIMEText(sendfile, 'base64', 'utf-8') text_att[\"Content-Type\"] = 'application/octet-stream' text_att[\"Content-Disposition\"] =", "name, addr = parseaddr(i) value, charset = decode_header(name)[0] if charset: value = value.decode(charset)", "password self._restart() def quit(self): if self.pop3_server is not None: self.pop3_server.quit() self.pop3_server = None", "= \";\".join(cc) mime_text = MIMEText(html, 'html', 'utf-8') if html is not None else", "err: send_result = True, None else: send_result = False, err except Exception, e:", "None args_user = None args_password = None def __init__(self, pop_server, user, password): self.args_pop_server", "self.args_password = password self._restart() def quit(self): if self.pop3_server is not None: self.pop3_server.quit() self.pop3_server", "'list': res[arg] = self.pop3_server.list() elif arg == 'latest': mails = self.pop3_server.list()[1] resp, lines,", "\"%a, %d %b %Y %H:%M:%S +0800\").strftime(self.SF) except Exception,e: result['Date'] = str(msg.get('Date', '')) result['Files']", "self._sender msg['To'] = \";\".join(to) msg['Cc'] = \";\".join(cc) mime_text = MIMEText(html, 'html', 'utf-8') if", "def quit(self): if self.pop3_server is not None: self.pop3_server.quit() self.pop3_server = None def _restart(self):", "value = value.decode(charset) result['Subject'] = value # 'From', 'To', 'Cc' for header in", "smtp.ehlo() smtp.login(self._username, self._password) err = smtp.sendmail(self._sender, to+cc, msg.as_string()) smtp.close() if not err: send_result", "tmp_addr_info = dict(name=value, addr=addr) result[header].append(tmp_addr_info) try: result['Date'] = datetime.strptime(msg.get('Date', ''), \"%a, %d %b", "_restart(self): self.quit() tmp_pop3_server = poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server = tmp_pop3_server def get(self, *args):", "from email.mime.text import MIMEText import smtplib import os import poplib from email.parser import", "'utf-8') msg.attach(mime_text) for file in files: if isinstance(file, str): file = file.decode(\"utf-8\") basename", "%Y %H:%M:%S +0800\").strftime(self.SF) except Exception,e: result['Date'] = str(msg.get('Date', '')) result['Files'] = [] result['Bodys']", "= \"%Y-%m-%d %H:%M:%S\" pop3_server = None args_pop_server = None args_user = None args_password", "'Cc']: result[header] = [] temp = msg.get(header, '') temp_list = temp.split(',') for i", "% basename.encode(\"gb2312\") msg.attach(text_att) # 发送邮件 smtp = smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username, self._password)", "self._password) err = smtp.sendmail(self._sender, to+cc, msg.as_string()) smtp.close() if not err: send_result = True,", "mixed为附件邮件类型 msg = MIMEMultipart('mixed') msg['Subject'] = subject msg['From'] = self._sender msg['To'] = \";\".join(to)", "= 'application/octet-stream' text_att[\"Content-Disposition\"] = 'attachment; filename=%s' % basename.encode(\"gb2312\") msg.attach(text_att) # 发送邮件 smtp =", "pop_server self.args_user = user self.args_password = password self._restart() def quit(self): if self.pop3_server is", "构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型 msg = MIMEMultipart('mixed') msg['Subject'] = subject msg['From'] = self._sender msg['To']", "self.pop3_server.stat()[0] elif arg == 'list': res[arg] = self.pop3_server.list() elif arg == 'latest': mails", "self._parse_message(msg) else: res[arg] = None return res def _parse_message(self, msg): result = {}", "res = {} for arg in args: if arg == 'stat': res[arg] =", "else: send_result = False, err except Exception, e: send_result = False, e return", "# 构造附件 sendfile = open(file, 'rb').read() text_att = MIMEText(sendfile, 'base64', 'utf-8') text_att[\"Content-Type\"] =", "self._sender = sender def send(self, subject, to, cc=[], text=None, html=None, files=[]): try: #", "= password self._restart() def quit(self): if self.pop3_server is not None: self.pop3_server.quit() self.pop3_server =", "== int: mails = self.pop3_server.list()[1] if arg > len(mails): res[arg] = None continue", "# 'From', 'To', 'Cc' for header in ['From', 'To', 'Cc']: result[header] = []", "to, cc=[], text=None, html=None, files=[]): try: # 构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型 msg = MIMEMultipart('mixed')", "= [] result['Bodys'] = [] for par in msg.walk(): name = par.get_filename() if", "'base64', 'utf-8') text_att[\"Content-Type\"] = 'application/octet-stream' text_att[\"Content-Disposition\"] = 'attachment; filename=%s' % basename.encode(\"gb2312\") msg.attach(text_att) #", "import decode_header from email.utils import parseaddr from datetime import datetime class Mail(object): def", "= smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username, self._password) err = smtp.sendmail(self._sender, to+cc, msg.as_string()) smtp.close()", "send_result = False, e return send_result class MailServer(object): SF = \"%Y-%m-%d %H:%M:%S\" pop3_server", "sendfile = open(file, 'rb').read() text_att = MIMEText(sendfile, 'base64', 'utf-8') text_att[\"Content-Type\"] = 'application/octet-stream' text_att[\"Content-Disposition\"]", "发送邮件 smtp = smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username, self._password) err = smtp.sendmail(self._sender, to+cc,", "if arg == 'stat': res[arg] = self.pop3_server.stat()[0] elif arg == 'list': res[arg] =", "charset: value = value.decode(charset) tmp_addr_info = dict(name=value, addr=addr) result[header].append(tmp_addr_info) try: result['Date'] = datetime.strptime(msg.get('Date',", "self.pop3_server.quit() self.pop3_server = None def _restart(self): self.quit() tmp_pop3_server = poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server", "%H:%M:%S +0800\").strftime(self.SF) except Exception,e: result['Date'] = str(msg.get('Date', '')) result['Files'] = [] result['Bodys'] =", "to+cc, msg.as_string()) smtp.close() if not err: send_result = True, None else: send_result =", "cc=[], text=None, html=None, files=[]): try: # 构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型 msg = MIMEMultipart('mixed') msg['Subject']", "temp = msg.get(header, '') temp_list = temp.split(',') for i in temp_list: if i", "= parseaddr(i) value, charset = decode_header(name)[0] if charset: value = value.decode(charset) tmp_addr_info =", "par.get_filename() if name: data = par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data)) else: body = par.get_payload(decode=True) if", "args_user = None args_password = None def __init__(self, pop_server, user, password): self.args_pop_server =", "= sender def send(self, subject, to, cc=[], text=None, html=None, files=[]): try: # 构造邮件对象MIMEMultipart对象", "= None args_pop_server = None args_user = None args_password = None def __init__(self,", "header in ['From', 'To', 'Cc']: result[header] = [] temp = msg.get(header, '') temp_list", "user, password): self.args_pop_server = pop_server self.args_user = user self.args_password = password self._restart() def", "{} # Subject subject_tmp = msg.get('Subject', '') value, charset = decode_header(subject_tmp)[0] if charset:", "for file in files: if isinstance(file, str): file = file.decode(\"utf-8\") basename = os.path.basename(file)", "= None args_password = None def __init__(self, pop_server, user, password): self.args_pop_server = pop_server", "'application/octet-stream' text_att[\"Content-Disposition\"] = 'attachment; filename=%s' % basename.encode(\"gb2312\") msg.attach(text_att) # 发送邮件 smtp = smtplib.SMTP_SSL(self._server,", "\";\".join(to) msg['Cc'] = \";\".join(cc) mime_text = MIMEText(html, 'html', 'utf-8') if html is not", "= str(msg.get('Date', '')) result['Files'] = [] result['Bodys'] = [] for par in msg.walk():", "= None continue resp, lines, octets = self.pop3_server.retr(arg) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] =", "= value.decode(charset) result['Subject'] = value # 'From', 'To', 'Cc' for header in ['From',", "except Exception,e: result['Date'] = str(msg.get('Date', '')) result['Files'] = [] result['Bodys'] = [] for", "# -*- coding:utf-8 -*- from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import", "= port self._username = username self._password = password self._sender = sender def send(self,", "msg.get('Subject', '') value, charset = decode_header(subject_tmp)[0] if charset: value = value.decode(charset) result['Subject'] =", "datetime import datetime class Mail(object): def __init__(self, server, port, username, password, sender): self._server", "{} for arg in args: if arg == 'stat': res[arg] = self.pop3_server.stat()[0] elif", "try: # 构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型 msg = MIMEMultipart('mixed') msg['Subject'] = subject msg['From'] =", "in files: if isinstance(file, str): file = file.decode(\"utf-8\") basename = os.path.basename(file) # 构造附件", "msg['Subject'] = subject msg['From'] = self._sender msg['To'] = \";\".join(to) msg['Cc'] = \";\".join(cc) mime_text", "= decode_header(subject_tmp)[0] if charset: value = value.decode(charset) result['Subject'] = value # 'From', 'To',", "name: data = par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data)) else: body = par.get_payload(decode=True) if body is", "send_result = True, None else: send_result = False, err except Exception, e: send_result", "charset: value = value.decode(charset) result['Subject'] = value # 'From', 'To', 'Cc' for header", "None args_pop_server = None args_user = None args_password = None def __init__(self, pop_server,", "_parse_message(self, msg): result = {} # Subject subject_tmp = msg.get('Subject', '') value, charset", "smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username, self._password) err = smtp.sendmail(self._sender, to+cc, msg.as_string()) smtp.close() if", "mails = self.pop3_server.list()[1] if arg > len(mails): res[arg] = None continue resp, lines,", "'latest': mails = self.pop3_server.list()[1] resp, lines, octets = self.pop3_server.retr(len(mails)) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg]", "else MIMEText(text, 'plain', 'utf-8') msg.attach(mime_text) for file in files: if isinstance(file, str): file", "'To', 'Cc']: result[header] = [] temp = msg.get(header, '') temp_list = temp.split(',') for", "msg.walk(): name = par.get_filename() if name: data = par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data)) else: body", "# mixed为附件邮件类型 msg = MIMEMultipart('mixed') msg['Subject'] = subject msg['From'] = self._sender msg['To'] =", "= self._sender msg['To'] = \";\".join(to) msg['Cc'] = \";\".join(cc) mime_text = MIMEText(html, 'html', 'utf-8')", "msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) elif type(arg) == int: mails = self.pop3_server.list()[1]", "-*- coding:utf-8 -*- from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import smtplib", "= decode_header(name)[0] if charset: value = value.decode(charset) tmp_addr_info = dict(name=value, addr=addr) result[header].append(tmp_addr_info) try:", "= [] for par in msg.walk(): name = par.get_filename() if name: data =", "parseaddr from datetime import datetime class Mail(object): def __init__(self, server, port, username, password,", "for i in temp_list: if i == '': continue name, addr = parseaddr(i)", "server, port, username, password, sender): self._server = server self._port = port self._username =", "<filename>_lever_utils/foo/helpers/mail/mail_helper.py # -*- coding:utf-8 -*- from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText", "''), \"%a, %d %b %Y %H:%M:%S +0800\").strftime(self.SF) except Exception,e: result['Date'] = str(msg.get('Date', ''))", "False, err except Exception, e: send_result = False, e return send_result class MailServer(object):", "smtp.sendmail(self._sender, to+cc, msg.as_string()) smtp.close() if not err: send_result = True, None else: send_result", "= None def _restart(self): self.quit() tmp_pop3_server = poplib.POP3(self.args_pop_server) tmp_pop3_server.user(self.args_user) tmp_pop3_server.pass_(self.args_password) self.pop3_server = tmp_pop3_server", "[] for par in msg.walk(): name = par.get_filename() if name: data = par.get_payload(decode=True)", "smtp.login(self._username, self._password) err = smtp.sendmail(self._sender, to+cc, msg.as_string()) smtp.close() if not err: send_result =", "= par.get_filename() if name: data = par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data)) else: body = par.get_payload(decode=True)", "= None args_user = None args_password = None def __init__(self, pop_server, user, password):", "== 'latest': mails = self.pop3_server.list()[1] resp, lines, octets = self.pop3_server.retr(len(mails)) msg = Parser().parsestr(b'\\r\\n'.join(lines))", "msg.attach(text_att) # 发送邮件 smtp = smtplib.SMTP_SSL(self._server, self._port) smtp.set_debuglevel(0) smtp.ehlo() smtp.login(self._username, self._password) err =", "data = par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data)) else: body = par.get_payload(decode=True) if body is not", "MailServer(object): SF = \"%Y-%m-%d %H:%M:%S\" pop3_server = None args_pop_server = None args_user =", "\"%Y-%m-%d %H:%M:%S\" pop3_server = None args_pop_server = None args_user = None args_password =", "def send(self, subject, to, cc=[], text=None, html=None, files=[]): try: # 构造邮件对象MIMEMultipart对象 # mixed为附件邮件类型", "'stat': res[arg] = self.pop3_server.stat()[0] elif arg == 'list': res[arg] = self.pop3_server.list() elif arg", "str(msg.get('Date', '')) result['Files'] = [] result['Bodys'] = [] for par in msg.walk(): name", "res[arg] = self._parse_message(msg) else: res[arg] = None return res def _parse_message(self, msg): result", "%H:%M:%S\" pop3_server = None args_pop_server = None args_user = None args_password = None", "None else MIMEText(text, 'plain', 'utf-8') msg.attach(mime_text) for file in files: if isinstance(file, str):", "= value # 'From', 'To', 'Cc' for header in ['From', 'To', 'Cc']: result[header]", "password): self.args_pop_server = pop_server self.args_user = user self.args_password = password self._restart() def quit(self):", "lines, octets = self.pop3_server.retr(len(mails)) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) elif type(arg) ==", "resp, lines, octets = self.pop3_server.retr(arg) msg = Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) else: res[arg]", "self.pop3_server.list()[1] if arg > len(mails): res[arg] = None continue resp, lines, octets =", "par in msg.walk(): name = par.get_filename() if name: data = par.get_payload(decode=True) result['Files'].append(dict(name=name, data=data))", "poplib from email.parser import Parser from email.header import decode_header from email.utils import parseaddr", "res[arg] = None return res def _parse_message(self, msg): result = {} # Subject", "Parser().parsestr(b'\\r\\n'.join(lines)) res[arg] = self._parse_message(msg) else: res[arg] = None return res def _parse_message(self, msg):", "Mail(object): def __init__(self, server, port, username, password, sender): self._server = server self._port =", "port self._username = username self._password = password self._sender = sender def send(self, subject,", "if arg > len(mails): res[arg] = None continue resp, lines, octets = self.pop3_server.retr(arg)", "# Subject subject_tmp = msg.get('Subject', '') value, charset = decode_header(subject_tmp)[0] if charset: value", "result = {} # Subject subject_tmp = msg.get('Subject', '') value, charset = decode_header(subject_tmp)[0]" ]
[ "# If there's only one label/vote, then use the quicker method of encoding", "= 1 return one_hot_label def inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses a k-hot encoded label back", "data points :param lower: The lower bound :param upper: The upper bound :return:", ":param db_emo_map: The database-specific emotion mapping :return: The label k-hot encoded to this", "encoding of the label \"\"\" one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] = 1 return", "0, 6, 2] Sample output: [1, 0, 1, 0, 0, 0, 0] :param", "label. Takes a list of emotion IDs and returns a list encoding the", "if len(label) == 1: return _one_hot_encode_label(label) # Convert the emotion numbers into an", "the emotion numbers into an array where the index is the emotion #", "\"\"\" Inverses a k-hot encoded label back into emotion ids. Sample input: [1,", "Gets the k-hot encoded label from a sample's filename. :param filename: The sample's", "= np.unique(label, return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count in zip(unique, counts): k_hot_label[emo_index]", "encoding if len(label) == 1: return _one_hot_encode_label(label) # Convert the emotion numbers into", "\"\"\" K-hot encodes a label. Takes a list of emotion IDs and returns", "1, 2, 0, 6, 2] Sample output: [1, 0, 1, 0, 0, 0,", "Private function to quickly one-hot encode a label. Sample input: [4] Sample output:", "this program's standard emotion map or False if the label doesn't map to", "upper else True def get_label(filename, delimiter, index, db_emo_map): \"\"\" Gets the k-hot encoded", "an outlier. Bounds are inclusive. :param wav: The audio time series data points", "so drop it if not np.any(k_hot_label): print(\"No usable label.\") return False return k_hot_label", "def k_hot_encode_label(label): \"\"\" K-hot encodes a label. Takes a list of emotion IDs", "filename the label/emotion is located :param db_emo_map: The database-specific emotion mapping :return: The", "all zero, then this sample doesn't fit with the set of labels #", "Sample output: [0, 0, 0, 0, 1, 0, 0] :param label: A list", "k_hot_label = np.floor(k_hot_label).astype(int) # If they're all zero, then this sample doesn't fit", "file holds common functions across all database processing such as calculating statistics. \"\"\"", "for that emotion unique, counts = np.unique(label, return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS) for emo_index,", "statistics. \"\"\" import numpy as np from src import em_constants as emc def", "of k-hot encoded labels or False if the label is unused \"\"\" #", "array where the index is the emotion # and the value is the", "label is unused \"\"\" # If there's only one label/vote, then use the", "0, 0, 0, 1, 0, 0] Sample output: [0, 4] :param k_hot_label: A", "of emotion IDs and returns a list encoding the most voted for emotion.", "common functions across all database processing such as calculating statistics. \"\"\" import numpy", "Where in the filename the label/emotion is located :param db_emo_map: The database-specific emotion", "k_hot_encode_label(label): \"\"\" K-hot encodes a label. Takes a list of emotion IDs and", "labels or False if the label is unused \"\"\" # If there's only", "the most voted for emotion. Sample input: [0, 1, 2, 0, 6, 2]", "sample's filename. :param filename: The sample's filename :param delimiter: The delimiter used in", "np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count in zip(unique, counts): k_hot_label[emo_index] = emo_count # Only count", "the number of votes for that emotion unique, counts = np.unique(label, return_counts=True) k_hot_label", "def is_outlier(wav, lower, upper): \"\"\" Checks if an audio sample is an outlier.", "to the standard emotions \"\"\" label = filename.split(delimiter)[index] standard_emotion = db_emo_map[label] emotion_id =", "\"\"\" label = filename.split(delimiter)[index] standard_emotion = db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def", "of the k-hot encoded label :return: A list of the emotion ids in", "[1. 0. 0. 0. 0. 0. 0.] Sample output: \"1_0_0_0_0_0_0\" :param label: Numpy", "label from a sample's filename. :param filename: The sample's filename :param delimiter: The", "in a filename-friendly format. Mostly used in the \"read_to_melspecgram()\" function to write out", "Sample output: [0, 4] :param k_hot_label: A list of the k-hot encoded label", ":return: One-hot encoding of the label \"\"\" one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] =", "functions across all database processing such as calculating statistics. \"\"\" import numpy as", "the \"read_to_melspecgram()\" function to write out labels in the filename. Sample input: [1.", "One-hot encoding of the label \"\"\" one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] = 1", "encoded label back into emotion ids. Sample input: [1, 0, 0, 0, 1,", "k-hot encoded label :return: A list of the emotion ids in the label", "k_hot_encode_label(emotion_id) def repr_label(label): \"\"\" Represents a label in a filename-friendly format. Mostly used", "0] :param label: List of labels to encode :return: List of k-hot encoded", "The lower bound :param upper: The upper bound :return: Boolean \"\"\" return False", "lower: The lower bound :param upper: The upper bound :return: Boolean \"\"\" return", "filename :param delimiter: The delimiter used in the filename :param index: Where in", "a label in a filename-friendly format. Mostly used in the \"read_to_melspecgram()\" function to", "0. 0. 0. 0. 0.] Sample output: \"1_0_0_0_0_0_0\" :param label: Numpy array representing", "with the highest amount of votes k_hot_label = k_hot_label / np.max(k_hot_label) k_hot_label =", "input: [1. 0. 0. 0. 0. 0. 0.] Sample output: \"1_0_0_0_0_0_0\" :param label:", "encode :return: List of k-hot encoded labels or False if the label is", "label (length is one) :return: One-hot encoding of the label \"\"\" one_hot_label =", "encode a label. Sample input: [4] Sample output: [0, 0, 0, 0, 1,", "False if the label doesn't map to the standard emotions \"\"\" label =", "\"\"\" Checks if an audio sample is an outlier. Bounds are inclusive. :param", "label k-hot encoded to this program's standard emotion map or False if the", "# Only count the emotions with the highest amount of votes k_hot_label =", "upper): \"\"\" Checks if an audio sample is an outlier. Bounds are inclusive.", "a label. Takes a list of emotion IDs and returns a list encoding", "\"\"\" This file holds common functions across all database processing such as calculating", "k-hot encoded label :return: String representation of the label \"\"\" return \"_\".join(str(emo) for", "label: A list with one label (length is one) :return: One-hot encoding of", "as calculating statistics. \"\"\" import numpy as np from src import em_constants as", "if an audio sample is an outlier. Bounds are inclusive. :param wav: The", "repr_label(label): \"\"\" Represents a label in a filename-friendly format. Mostly used in the", "<= upper else True def get_label(filename, delimiter, index, db_emo_map): \"\"\" Gets the k-hot", "of the label \"\"\" one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] = 1 return one_hot_label", "String representation of the label \"\"\" return \"_\".join(str(emo) for emo in label) def", "the k-hot encoded label :return: String representation of the label \"\"\" return \"_\".join(str(emo)", "Sample input: [0, 1, 2, 0, 6, 2] Sample output: [1, 0, 1,", "count the emotions with the highest amount of votes k_hot_label = k_hot_label /", "a k-hot encoded label back into emotion ids. Sample input: [1, 0, 0,", "The audio time series data points :param lower: The lower bound :param upper:", "the highest amount of votes k_hot_label = k_hot_label / np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int)", "encoded labels or False if the label is unused \"\"\" # If there's", "import em_constants as emc def is_outlier(wav, lower, upper): \"\"\" Checks if an audio", "k_hot_label = np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count in zip(unique, counts): k_hot_label[emo_index] = emo_count #", ":param label: A list with one label (length is one) :return: One-hot encoding", "[emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def repr_label(label): \"\"\" Represents a label in a filename-friendly format.", ":return: The label k-hot encoded to this program's standard emotion map or False", "is an outlier. Bounds are inclusive. :param wav: The audio time series data", "there's only one label/vote, then use the quicker method of encoding if len(label)", "database-specific emotion mapping :return: The label k-hot encoded to this program's standard emotion", "This file holds common functions across all database processing such as calculating statistics.", "a sample's filename. :param filename: The sample's filename :param delimiter: The delimiter used", "inclusive. :param wav: The audio time series data points :param lower: The lower", "Sample input: [1. 0. 0. 0. 0. 0. 0.] Sample output: \"1_0_0_0_0_0_0\" :param", "a filename-friendly format. Mostly used in the \"read_to_melspecgram()\" function to write out labels", "returns a list encoding the most voted for emotion. Sample input: [0, 1,", "representation of the label \"\"\" return \"_\".join(str(emo) for emo in label) def k_hot_encode_label(label):", "ids. Sample input: [1, 0, 0, 0, 1, 0, 0] Sample output: [0,", "sample doesn't fit with the set of labels # that we're considering so", "# and the value is the number of votes for that emotion unique,", "\"\"\" # If there's only one label/vote, then use the quicker method of", "of labels # that we're considering so drop it if not np.any(k_hot_label): print(\"No", "= k_hot_label / np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int) # If they're all zero, then", "of votes for that emotion unique, counts = np.unique(label, return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS)", "to quickly one-hot encode a label. Sample input: [4] Sample output: [0, 0,", "filename. :param filename: The sample's filename :param delimiter: The delimiter used in the", "k_hot_label = k_hot_label / np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int) # If they're all zero,", "a label. Private function to quickly one-hot encode a label. Sample input: [4]", "0, 0, 0, 1, 0, 0] :param label: A list with one label", "4] :param k_hot_label: A list of the k-hot encoded label :return: A list", "emotion IDs and returns a list encoding the most voted for emotion. Sample", "# If they're all zero, then this sample doesn't fit with the set", "out labels in the filename. Sample input: [1. 0. 0. 0. 0. 0.", "processing such as calculating statistics. \"\"\" import numpy as np from src import", "\"\"\" Gets the k-hot encoded label from a sample's filename. :param filename: The", "from src import em_constants as emc def is_outlier(wav, lower, upper): \"\"\" Checks if", "output: [1, 0, 1, 0, 0, 0, 0] :param label: List of labels", "as emc def is_outlier(wav, lower, upper): \"\"\" Checks if an audio sample is", "False if lower <= len(wav) <= upper else True def get_label(filename, delimiter, index,", "list encoding the most voted for emotion. Sample input: [0, 1, 2, 0,", "used in the \"read_to_melspecgram()\" function to write out labels in the filename. Sample", "standard emotion map or False if the label doesn't map to the standard", "return _one_hot_encode_label(label) # Convert the emotion numbers into an array where the index", "One hot encodes a label. Private function to quickly one-hot encode a label.", "map to the standard emotions \"\"\" label = filename.split(delimiter)[index] standard_emotion = db_emo_map[label] emotion_id", "one label/vote, then use the quicker method of encoding if len(label) == 1:", "label back into emotion ids. Sample input: [1, 0, 0, 0, 1, 0,", "program's standard emotion map or False if the label doesn't map to the", "index is the emotion # and the value is the number of votes", "is unused \"\"\" # If there's only one label/vote, then use the quicker", "votes for that emotion unique, counts = np.unique(label, return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS) for", "/ np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int) # If they're all zero, then this sample", "use the quicker method of encoding if len(label) == 1: return _one_hot_encode_label(label) #", "emotion ids. Sample input: [1, 0, 0, 0, 1, 0, 0] Sample output:", "The upper bound :return: Boolean \"\"\" return False if lower <= len(wav) <=", "if the label is unused \"\"\" # If there's only one label/vote, then", "numpy as np from src import em_constants as emc def is_outlier(wav, lower, upper):", "2, 0, 6, 2] Sample output: [1, 0, 1, 0, 0, 0, 0]", "return k_hot_label def _one_hot_encode_label(label): \"\"\" One hot encodes a label. Private function to", "into an array where the index is the emotion # and the value", "get_label(filename, delimiter, index, db_emo_map): \"\"\" Gets the k-hot encoded label from a sample's", "of the label \"\"\" return \"_\".join(str(emo) for emo in label) def k_hot_encode_label(label): \"\"\"", "list of emotion IDs and returns a list encoding the most voted for", "emotion_id = [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def repr_label(label): \"\"\" Represents a label in a", "across all database processing such as calculating statistics. \"\"\" import numpy as np", "numbers into an array where the index is the emotion # and the", "outlier. Bounds are inclusive. :param wav: The audio time series data points :param", "emotion unique, counts = np.unique(label, return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count in", "return \"_\".join(str(emo) for emo in label) def k_hot_encode_label(label): \"\"\" K-hot encodes a label.", "else True def get_label(filename, delimiter, index, db_emo_map): \"\"\" Gets the k-hot encoded label", "labels # that we're considering so drop it if not np.any(k_hot_label): print(\"No usable", "\"\"\" Represents a label in a filename-friendly format. Mostly used in the \"read_to_melspecgram()\"", "mapping :return: The label k-hot encoded to this program's standard emotion map or", "counts): k_hot_label[emo_index] = emo_count # Only count the emotions with the highest amount", "return k_hot_encode_label(emotion_id) def repr_label(label): \"\"\" Represents a label in a filename-friendly format. Mostly", "The delimiter used in the filename :param index: Where in the filename the", ":param delimiter: The delimiter used in the filename :param index: Where in the", "A list of the k-hot encoded label :return: A list of the emotion", "one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] = 1 return one_hot_label def inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses", "\"\"\" return \"_\".join(str(emo) for emo in label) def k_hot_encode_label(label): \"\"\" K-hot encodes a", "np.any(k_hot_label): print(\"No usable label.\") return False return k_hot_label def _one_hot_encode_label(label): \"\"\" One hot", "6, 2] Sample output: [1, 0, 1, 0, 0, 0, 0] :param label:", "label/emotion is located :param db_emo_map: The database-specific emotion mapping :return: The label k-hot", "k-hot encoded labels or False if the label is unused \"\"\" # If", "zip(unique, counts): k_hot_label[emo_index] = emo_count # Only count the emotions with the highest", "the index is the emotion # and the value is the number of", "not np.any(k_hot_label): print(\"No usable label.\") return False return k_hot_label def _one_hot_encode_label(label): \"\"\" One", "List of labels to encode :return: List of k-hot encoded labels or False", "(length is one) :return: One-hot encoding of the label \"\"\" one_hot_label = np.zeros(emc.NUM_EMOTIONS,", "sample is an outlier. Bounds are inclusive. :param wav: The audio time series", "function to write out labels in the filename. Sample input: [1. 0. 0.", "unused \"\"\" # If there's only one label/vote, then use the quicker method", "A list with one label (length is one) :return: One-hot encoding of the", "Bounds are inclusive. :param wav: The audio time series data points :param lower:", "one_hot_label[label[0]] = 1 return one_hot_label def inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses a k-hot encoded label", "back into emotion ids. Sample input: [1, 0, 0, 0, 1, 0, 0]", "time series data points :param lower: The lower bound :param upper: The upper", "\"\"\" return False if lower <= len(wav) <= upper else True def get_label(filename,", "output: \"1_0_0_0_0_0_0\" :param label: Numpy array representing the k-hot encoded label :return: String", "index, db_emo_map): \"\"\" Gets the k-hot encoded label from a sample's filename. :param", "filename.split(delimiter)[index] standard_emotion = db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def repr_label(label): \"\"\" Represents", "we're considering so drop it if not np.any(k_hot_label): print(\"No usable label.\") return False", "and returns a list encoding the most voted for emotion. Sample input: [0,", "standard emotions \"\"\" label = filename.split(delimiter)[index] standard_emotion = db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]] return", "the label \"\"\" return \"_\".join(str(emo) for emo in label) def k_hot_encode_label(label): \"\"\" K-hot", ":return: String representation of the label \"\"\" return \"_\".join(str(emo) for emo in label)", "If there's only one label/vote, then use the quicker method of encoding if", "0] Sample output: [0, 4] :param k_hot_label: A list of the k-hot encoded", "emc def is_outlier(wav, lower, upper): \"\"\" Checks if an audio sample is an", "doesn't fit with the set of labels # that we're considering so drop", "np.floor(k_hot_label).astype(int) # If they're all zero, then this sample doesn't fit with the", "def get_label(filename, delimiter, index, db_emo_map): \"\"\" Gets the k-hot encoded label from a", "upper: The upper bound :return: Boolean \"\"\" return False if lower <= len(wav)", "db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def repr_label(label): \"\"\" Represents a label in", "def _one_hot_encode_label(label): \"\"\" One hot encodes a label. Private function to quickly one-hot", "output: [0, 0, 0, 0, 1, 0, 0] :param label: A list with", "[4] Sample output: [0, 0, 0, 0, 1, 0, 0] :param label: A", "if the label doesn't map to the standard emotions \"\"\" label = filename.split(delimiter)[index]", "np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int) # If they're all zero, then this sample doesn't", "is_outlier(wav, lower, upper): \"\"\" Checks if an audio sample is an outlier. Bounds", "label doesn't map to the standard emotions \"\"\" label = filename.split(delimiter)[index] standard_emotion =", "print(\"No usable label.\") return False return k_hot_label def _one_hot_encode_label(label): \"\"\" One hot encodes", "audio sample is an outlier. Bounds are inclusive. :param wav: The audio time", "map or False if the label doesn't map to the standard emotions \"\"\"", "\"1_0_0_0_0_0_0\" :param label: Numpy array representing the k-hot encoded label :return: String representation", "in the filename :param index: Where in the filename the label/emotion is located", "Mostly used in the \"read_to_melspecgram()\" function to write out labels in the filename.", "to write out labels in the filename. Sample input: [1. 0. 0. 0.", "encoded to this program's standard emotion map or False if the label doesn't", "0, 0, 0, 0] :param label: List of labels to encode :return: List", "1, 0, 0] Sample output: [0, 4] :param k_hot_label: A list of the", "Inverses a k-hot encoded label back into emotion ids. Sample input: [1, 0,", "an audio sample is an outlier. Bounds are inclusive. :param wav: The audio", "[1, 0, 1, 0, 0, 0, 0] :param label: List of labels to", "db_emo_map): \"\"\" Gets the k-hot encoded label from a sample's filename. :param filename:", "lower bound :param upper: The upper bound :return: Boolean \"\"\" return False if", "0, 0, 1, 0, 0] :param label: A list with one label (length", "em_constants as emc def is_outlier(wav, lower, upper): \"\"\" Checks if an audio sample", "a list encoding the most voted for emotion. Sample input: [0, 1, 2,", "label = filename.split(delimiter)[index] standard_emotion = db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def repr_label(label):", "upper bound :return: Boolean \"\"\" return False if lower <= len(wav) <= upper", "all database processing such as calculating statistics. \"\"\" import numpy as np from", "\"\"\" one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] = 1 return one_hot_label def inverse_k_hot_encode_label(k_hot_label): \"\"\"", "k_hot_label[emo_index] = emo_count # Only count the emotions with the highest amount of", "# that we're considering so drop it if not np.any(k_hot_label): print(\"No usable label.\")", "the label doesn't map to the standard emotions \"\"\" label = filename.split(delimiter)[index] standard_emotion", "most voted for emotion. Sample input: [0, 1, 2, 0, 6, 2] Sample", "in label) def k_hot_encode_label(label): \"\"\" K-hot encodes a label. Takes a list of", "to encode :return: List of k-hot encoded labels or False if the label", "label: List of labels to encode :return: List of k-hot encoded labels or", "emotions \"\"\" label = filename.split(delimiter)[index] standard_emotion = db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id)", "is the emotion # and the value is the number of votes for", "the label \"\"\" one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] = 1 return one_hot_label def", "the k-hot encoded label :return: A list of the emotion ids in the", "function to quickly one-hot encode a label. Sample input: [4] Sample output: [0,", "number of votes for that emotion unique, counts = np.unique(label, return_counts=True) k_hot_label =", "the quicker method of encoding if len(label) == 1: return _one_hot_encode_label(label) # Convert", "encoded label :return: String representation of the label \"\"\" return \"_\".join(str(emo) for emo", ":param k_hot_label: A list of the k-hot encoded label :return: A list of", "used in the filename :param index: Where in the filename the label/emotion is", "0. 0. 0. 0.] Sample output: \"1_0_0_0_0_0_0\" :param label: Numpy array representing the", "np.unique(label, return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count in zip(unique, counts): k_hot_label[emo_index] =", "List of k-hot encoded labels or False if the label is unused \"\"\"", "k-hot encoded label back into emotion ids. Sample input: [1, 0, 0, 0,", "in the \"read_to_melspecgram()\" function to write out labels in the filename. Sample input:", "are inclusive. :param wav: The audio time series data points :param lower: The", "one-hot encode a label. Sample input: [4] Sample output: [0, 0, 0, 0,", "counts = np.unique(label, return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count in zip(unique, counts):", "one_hot_label def inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses a k-hot encoded label back into emotion ids.", "holds common functions across all database processing such as calculating statistics. \"\"\" import", "_one_hot_encode_label(label) # Convert the emotion numbers into an array where the index is", "return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count in zip(unique, counts): k_hot_label[emo_index] = emo_count", "is one) :return: One-hot encoding of the label \"\"\" one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int)", "is the number of votes for that emotion unique, counts = np.unique(label, return_counts=True)", "len(wav) <= upper else True def get_label(filename, delimiter, index, db_emo_map): \"\"\" Gets the", "filename: The sample's filename :param delimiter: The delimiter used in the filename :param", "Sample output: \"1_0_0_0_0_0_0\" :param label: Numpy array representing the k-hot encoded label :return:", "emotion numbers into an array where the index is the emotion # and", "\"read_to_melspecgram()\" function to write out labels in the filename. Sample input: [1. 0.", "filename. Sample input: [1. 0. 0. 0. 0. 0. 0.] Sample output: \"1_0_0_0_0_0_0\"", "method of encoding if len(label) == 1: return _one_hot_encode_label(label) # Convert the emotion", "k_hot_label / np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int) # If they're all zero, then this", "0, 1, 0, 0] :param label: A list with one label (length is", "encoded label :return: A list of the emotion ids in the label \"\"\"", "array representing the k-hot encoded label :return: String representation of the label \"\"\"", "votes k_hot_label = k_hot_label / np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int) # If they're all", ":return: Boolean \"\"\" return False if lower <= len(wav) <= upper else True", "The sample's filename :param delimiter: The delimiter used in the filename :param index:", "label) def k_hot_encode_label(label): \"\"\" K-hot encodes a label. Takes a list of emotion", "0, 0] Sample output: [0, 4] :param k_hot_label: A list of the k-hot", "the filename :param index: Where in the filename the label/emotion is located :param", "inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses a k-hot encoded label back into emotion ids. Sample input:", "list of the k-hot encoded label :return: A list of the emotion ids", "points :param lower: The lower bound :param upper: The upper bound :return: Boolean", "that we're considering so drop it if not np.any(k_hot_label): print(\"No usable label.\") return", "drop it if not np.any(k_hot_label): print(\"No usable label.\") return False return k_hot_label def", "or False if the label doesn't map to the standard emotions \"\"\" label", "emotions with the highest amount of votes k_hot_label = k_hot_label / np.max(k_hot_label) k_hot_label", ":param lower: The lower bound :param upper: The upper bound :return: Boolean \"\"\"", "0, 0] :param label: A list with one label (length is one) :return:", "bound :param upper: The upper bound :return: Boolean \"\"\" return False if lower", "<= len(wav) <= upper else True def get_label(filename, delimiter, index, db_emo_map): \"\"\" Gets", "wav: The audio time series data points :param lower: The lower bound :param", "amount of votes k_hot_label = k_hot_label / np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int) # If", "in zip(unique, counts): k_hot_label[emo_index] = emo_count # Only count the emotions with the", "of encoding if len(label) == 1: return _one_hot_encode_label(label) # Convert the emotion numbers", "unique, counts = np.unique(label, return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count in zip(unique,", "label :return: String representation of the label \"\"\" return \"_\".join(str(emo) for emo in", "= db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def repr_label(label): \"\"\" Represents a label", "dtype=int) one_hot_label[label[0]] = 1 return one_hot_label def inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses a k-hot encoded", "= [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def repr_label(label): \"\"\" Represents a label in a filename-friendly", "the filename. Sample input: [1. 0. 0. 0. 0. 0. 0.] Sample output:", "input: [1, 0, 0, 0, 1, 0, 0] Sample output: [0, 4] :param", ":return: A list of the emotion ids in the label \"\"\" return np.where(k_hot_label", "the label/emotion is located :param db_emo_map: The database-specific emotion mapping :return: The label", "labels to encode :return: List of k-hot encoded labels or False if the", ":param label: Numpy array representing the k-hot encoded label :return: String representation of", "Boolean \"\"\" return False if lower <= len(wav) <= upper else True def", "0. 0. 0.] Sample output: \"1_0_0_0_0_0_0\" :param label: Numpy array representing the k-hot", "database processing such as calculating statistics. \"\"\" import numpy as np from src", "np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] = 1 return one_hot_label def inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses a k-hot", "to this program's standard emotion map or False if the label doesn't map", "an array where the index is the emotion # and the value is", "voted for emotion. Sample input: [0, 1, 2, 0, 6, 2] Sample output:", "[0, 1, 2, 0, 6, 2] Sample output: [1, 0, 1, 0, 0,", "they're all zero, then this sample doesn't fit with the set of labels", "in the filename the label/emotion is located :param db_emo_map: The database-specific emotion mapping", "0, 0, 1, 0, 0] Sample output: [0, 4] :param k_hot_label: A list", "IDs and returns a list encoding the most voted for emotion. Sample input:", "src import em_constants as emc def is_outlier(wav, lower, upper): \"\"\" Checks if an", ":param wav: The audio time series data points :param lower: The lower bound", "as np from src import em_constants as emc def is_outlier(wav, lower, upper): \"\"\"", "output: [0, 4] :param k_hot_label: A list of the k-hot encoded label :return:", "Represents a label in a filename-friendly format. Mostly used in the \"read_to_melspecgram()\" function", "fit with the set of labels # that we're considering so drop it", "= emo_count # Only count the emotions with the highest amount of votes", "Only count the emotions with the highest amount of votes k_hot_label = k_hot_label", "format. Mostly used in the \"read_to_melspecgram()\" function to write out labels in the", "= np.floor(k_hot_label).astype(int) # If they're all zero, then this sample doesn't fit with", "\"\"\" import numpy as np from src import em_constants as emc def is_outlier(wav,", "highest amount of votes k_hot_label = k_hot_label / np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int) #", "Checks if an audio sample is an outlier. Bounds are inclusive. :param wav:", "delimiter used in the filename :param index: Where in the filename the label/emotion", "of votes k_hot_label = k_hot_label / np.max(k_hot_label) k_hot_label = np.floor(k_hot_label).astype(int) # If they're", "import numpy as np from src import em_constants as emc def is_outlier(wav, lower,", "sample's filename :param delimiter: The delimiter used in the filename :param index: Where", "quicker method of encoding if len(label) == 1: return _one_hot_encode_label(label) # Convert the", "== 1: return _one_hot_encode_label(label) # Convert the emotion numbers into an array where", "False if the label is unused \"\"\" # If there's only one label/vote,", "k-hot encoded label from a sample's filename. :param filename: The sample's filename :param", "delimiter: The delimiter used in the filename :param index: Where in the filename", "list with one label (length is one) :return: One-hot encoding of the label", "label. Private function to quickly one-hot encode a label. Sample input: [4] Sample", "encoding the most voted for emotion. Sample input: [0, 1, 2, 0, 6,", "the emotions with the highest amount of votes k_hot_label = k_hot_label / np.max(k_hot_label)", "with one label (length is one) :return: One-hot encoding of the label \"\"\"", "the emotion # and the value is the number of votes for that", "filename :param index: Where in the filename the label/emotion is located :param db_emo_map:", "encodes a label. Private function to quickly one-hot encode a label. Sample input:", "into emotion ids. Sample input: [1, 0, 0, 0, 1, 0, 0] Sample", "input: [4] Sample output: [0, 0, 0, 0, 1, 0, 0] :param label:", "The database-specific emotion mapping :return: The label k-hot encoded to this program's standard", "0.] Sample output: \"1_0_0_0_0_0_0\" :param label: Numpy array representing the k-hot encoded label", "then use the quicker method of encoding if len(label) == 1: return _one_hot_encode_label(label)", "emo_count # Only count the emotions with the highest amount of votes k_hot_label", "= filename.split(delimiter)[index] standard_emotion = db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def repr_label(label): \"\"\"", "hot encodes a label. Private function to quickly one-hot encode a label. Sample", "quickly one-hot encode a label. Sample input: [4] Sample output: [0, 0, 0,", "# Convert the emotion numbers into an array where the index is the", "def repr_label(label): \"\"\" Represents a label in a filename-friendly format. Mostly used in", "Sample output: [1, 0, 1, 0, 0, 0, 0] :param label: List of", "0, 1, 0, 0] Sample output: [0, 4] :param k_hot_label: A list of", ":param label: List of labels to encode :return: List of k-hot encoded labels", "[0, 0, 0, 0, 1, 0, 0] :param label: A list with one", "0] :param label: A list with one label (length is one) :return: One-hot", "Sample input: [4] Sample output: [0, 0, 0, 0, 1, 0, 0] :param", ":param upper: The upper bound :return: Boolean \"\"\" return False if lower <=", "np from src import em_constants as emc def is_outlier(wav, lower, upper): \"\"\" Checks", "in the filename. Sample input: [1. 0. 0. 0. 0. 0. 0.] Sample", "lower, upper): \"\"\" Checks if an audio sample is an outlier. Bounds are", "return False return k_hot_label def _one_hot_encode_label(label): \"\"\" One hot encodes a label. Private", "encodes a label. Takes a list of emotion IDs and returns a list", "\"_\".join(str(emo) for emo in label) def k_hot_encode_label(label): \"\"\" K-hot encodes a label. Takes", "return False if lower <= len(wav) <= upper else True def get_label(filename, delimiter,", "0. 0.] Sample output: \"1_0_0_0_0_0_0\" :param label: Numpy array representing the k-hot encoded", "emo in label) def k_hot_encode_label(label): \"\"\" K-hot encodes a label. Takes a list", "only one label/vote, then use the quicker method of encoding if len(label) ==", "series data points :param lower: The lower bound :param upper: The upper bound", "the label is unused \"\"\" # If there's only one label/vote, then use", "audio time series data points :param lower: The lower bound :param upper: The", "= np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] = 1 return one_hot_label def inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses a", "located :param db_emo_map: The database-specific emotion mapping :return: The label k-hot encoded to", "is located :param db_emo_map: The database-specific emotion mapping :return: The label k-hot encoded", "it if not np.any(k_hot_label): print(\"No usable label.\") return False return k_hot_label def _one_hot_encode_label(label):", "with the set of labels # that we're considering so drop it if", "representing the k-hot encoded label :return: String representation of the label \"\"\" return", "index: Where in the filename the label/emotion is located :param db_emo_map: The database-specific", "emotion mapping :return: The label k-hot encoded to this program's standard emotion map", "the k-hot encoded label from a sample's filename. :param filename: The sample's filename", "a label. Sample input: [4] Sample output: [0, 0, 0, 0, 1, 0,", "input: [0, 1, 2, 0, 6, 2] Sample output: [1, 0, 1, 0,", "label. Sample input: [4] Sample output: [0, 0, 0, 0, 1, 0, 0]", "label.\") return False return k_hot_label def _one_hot_encode_label(label): \"\"\" One hot encodes a label.", "bound :return: Boolean \"\"\" return False if lower <= len(wav) <= upper else", "zero, then this sample doesn't fit with the set of labels # that", "encoded label from a sample's filename. :param filename: The sample's filename :param delimiter:", "labels in the filename. Sample input: [1. 0. 0. 0. 0. 0. 0.]", "such as calculating statistics. \"\"\" import numpy as np from src import em_constants", "0, 0] :param label: List of labels to encode :return: List of k-hot", "k-hot encoded to this program's standard emotion map or False if the label", "where the index is the emotion # and the value is the number", "Numpy array representing the k-hot encoded label :return: String representation of the label", "label: Numpy array representing the k-hot encoded label :return: String representation of the", "if not np.any(k_hot_label): print(\"No usable label.\") return False return k_hot_label def _one_hot_encode_label(label): \"\"\"", "1 return one_hot_label def inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses a k-hot encoded label back into", "the value is the number of votes for that emotion unique, counts =", "emotion map or False if the label doesn't map to the standard emotions", "the set of labels # that we're considering so drop it if not", "label in a filename-friendly format. Mostly used in the \"read_to_melspecgram()\" function to write", "\"\"\" One hot encodes a label. Private function to quickly one-hot encode a", ":param filename: The sample's filename :param delimiter: The delimiter used in the filename", "Convert the emotion numbers into an array where the index is the emotion", "1, 0, 0] :param label: A list with one label (length is one)", "The label k-hot encoded to this program's standard emotion map or False if", "2] Sample output: [1, 0, 1, 0, 0, 0, 0] :param label: List", "emo_index, emo_count in zip(unique, counts): k_hot_label[emo_index] = emo_count # Only count the emotions", "db_emo_map: The database-specific emotion mapping :return: The label k-hot encoded to this program's", "standard_emotion = db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]] return k_hot_encode_label(emotion_id) def repr_label(label): \"\"\" Represents a", "label/vote, then use the quicker method of encoding if len(label) == 1: return", "the standard emotions \"\"\" label = filename.split(delimiter)[index] standard_emotion = db_emo_map[label] emotion_id = [emc.EMOTION_MAP[standard_emotion]]", "one) :return: One-hot encoding of the label \"\"\" one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]]", "a list of emotion IDs and returns a list encoding the most voted", "value is the number of votes for that emotion unique, counts = np.unique(label,", "write out labels in the filename. Sample input: [1. 0. 0. 0. 0.", "one label (length is one) :return: One-hot encoding of the label \"\"\" one_hot_label", "if lower <= len(wav) <= upper else True def get_label(filename, delimiter, index, db_emo_map):", "0, 1, 0, 0, 0, 0] :param label: List of labels to encode", "A list of the emotion ids in the label \"\"\" return np.where(k_hot_label ==", "return one_hot_label def inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses a k-hot encoded label back into emotion", "Sample input: [1, 0, 0, 0, 1, 0, 0] Sample output: [0, 4]", "[1, 0, 0, 0, 1, 0, 0] Sample output: [0, 4] :param k_hot_label:", "then this sample doesn't fit with the set of labels # that we're", "filename-friendly format. Mostly used in the \"read_to_melspecgram()\" function to write out labels in", "considering so drop it if not np.any(k_hot_label): print(\"No usable label.\") return False return", "emo_count in zip(unique, counts): k_hot_label[emo_index] = emo_count # Only count the emotions with", "usable label.\") return False return k_hot_label def _one_hot_encode_label(label): \"\"\" One hot encodes a", "of labels to encode :return: List of k-hot encoded labels or False if", "from a sample's filename. :param filename: The sample's filename :param delimiter: The delimiter", "doesn't map to the standard emotions \"\"\" label = filename.split(delimiter)[index] standard_emotion = db_emo_map[label]", "for emotion. Sample input: [0, 1, 2, 0, 6, 2] Sample output: [1,", "or False if the label is unused \"\"\" # If there's only one", "for emo_index, emo_count in zip(unique, counts): k_hot_label[emo_index] = emo_count # Only count the", ":param index: Where in the filename the label/emotion is located :param db_emo_map: The", "def inverse_k_hot_encode_label(k_hot_label): \"\"\" Inverses a k-hot encoded label back into emotion ids. Sample", "k_hot_label: A list of the k-hot encoded label :return: A list of the", "delimiter, index, db_emo_map): \"\"\" Gets the k-hot encoded label from a sample's filename.", "lower <= len(wav) <= upper else True def get_label(filename, delimiter, index, db_emo_map): \"\"\"", "If they're all zero, then this sample doesn't fit with the set of", "label :return: A list of the emotion ids in the label \"\"\" return", "1, 0, 0, 0, 0] :param label: List of labels to encode :return:", "label \"\"\" one_hot_label = np.zeros(emc.NUM_EMOTIONS, dtype=int) one_hot_label[label[0]] = 1 return one_hot_label def inverse_k_hot_encode_label(k_hot_label):", "[0, 4] :param k_hot_label: A list of the k-hot encoded label :return: A", "0, 0, 0] :param label: List of labels to encode :return: List of", "emotion. Sample input: [0, 1, 2, 0, 6, 2] Sample output: [1, 0,", "0. 0. 0. 0. 0. 0.] Sample output: \"1_0_0_0_0_0_0\" :param label: Numpy array", "_one_hot_encode_label(label): \"\"\" One hot encodes a label. Private function to quickly one-hot encode", ":return: List of k-hot encoded labels or False if the label is unused", "the filename the label/emotion is located :param db_emo_map: The database-specific emotion mapping :return:", "len(label) == 1: return _one_hot_encode_label(label) # Convert the emotion numbers into an array", "True def get_label(filename, delimiter, index, db_emo_map): \"\"\" Gets the k-hot encoded label from", "list of the emotion ids in the label \"\"\" return np.where(k_hot_label == 1)[0]", "= np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count in zip(unique, counts): k_hot_label[emo_index] = emo_count # Only", "that emotion unique, counts = np.unique(label, return_counts=True) k_hot_label = np.zeros(emc.NUM_EMOTIONS) for emo_index, emo_count", "emotion # and the value is the number of votes for that emotion", "Takes a list of emotion IDs and returns a list encoding the most", "set of labels # that we're considering so drop it if not np.any(k_hot_label):", "and the value is the number of votes for that emotion unique, counts", "K-hot encodes a label. Takes a list of emotion IDs and returns a", "k_hot_label def _one_hot_encode_label(label): \"\"\" One hot encodes a label. Private function to quickly", "1: return _one_hot_encode_label(label) # Convert the emotion numbers into an array where the", "this sample doesn't fit with the set of labels # that we're considering", "for emo in label) def k_hot_encode_label(label): \"\"\" K-hot encodes a label. Takes a", "label \"\"\" return \"_\".join(str(emo) for emo in label) def k_hot_encode_label(label): \"\"\" K-hot encodes", "calculating statistics. \"\"\" import numpy as np from src import em_constants as emc", "False return k_hot_label def _one_hot_encode_label(label): \"\"\" One hot encodes a label. Private function" ]
[ "= [ path('', views.index, name='index'), path('sign-in', views.signup, name='sign-in'), path('home-page', views.loggedIn, name='home-page'), path('log-out', views.logout_view,", "include from . import views urlpatterns = [ path('', views.index, name='index'), path('sign-in', views.signup,", "from . import views urlpatterns = [ path('', views.index, name='index'), path('sign-in', views.signup, name='sign-in'),", "path, include from . import views urlpatterns = [ path('', views.index, name='index'), path('sign-in',", "import path, include from . import views urlpatterns = [ path('', views.index, name='index'),", ". import views urlpatterns = [ path('', views.index, name='index'), path('sign-in', views.signup, name='sign-in'), path('home-page',", "from django.conf.urls import url from django.urls import path, include from . import views", "django.urls import path, include from . import views urlpatterns = [ path('', views.index,", "from django.urls import path, include from . import views urlpatterns = [ path('',", "django.conf.urls import url from django.urls import path, include from . import views urlpatterns", "urlpatterns = [ path('', views.index, name='index'), path('sign-in', views.signup, name='sign-in'), path('home-page', views.loggedIn, name='home-page'), path('log-out',", "path('', views.index, name='index'), path('sign-in', views.signup, name='sign-in'), path('home-page', views.loggedIn, name='home-page'), path('log-out', views.logout_view, name='log-out'), ]", "[ path('', views.index, name='index'), path('sign-in', views.signup, name='sign-in'), path('home-page', views.loggedIn, name='home-page'), path('log-out', views.logout_view, name='log-out'),", "import views urlpatterns = [ path('', views.index, name='index'), path('sign-in', views.signup, name='sign-in'), path('home-page', views.loggedIn,", "views urlpatterns = [ path('', views.index, name='index'), path('sign-in', views.signup, name='sign-in'), path('home-page', views.loggedIn, name='home-page'),", "<gh_stars>0 from django.conf.urls import url from django.urls import path, include from . import", "url from django.urls import path, include from . import views urlpatterns = [", "import url from django.urls import path, include from . import views urlpatterns =" ]
[]
[ "with self.assertRaises(RuntimeError): model.alpha = random.rand(K + 1) alpha = random.rand(K, 1) model.alpha =", "1) alpha = random.rand(K, 1) model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20) def", "'w') as handle: dump({'model': model0}, handle) # load model with open(tmp_file) as handle:", "in the right direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self): for", "numpy import corrcoef, random, abs, max, asarray, round, zeros_like from trlda.models import BatchLDA", "3.1 model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words)", "D = 1010 K = 11 alpha = .27 eta = 3.1 model", "# this will sample a beta with the given eta model.lambdas = zeros_like(model.lambdas)", "eta), abs(model.eta - initial_eta)) def test_pickle(self): model0 = BatchLDA( num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand())", "load, dump from tempfile import mkstemp from random import choice, randint from string", "1010 K = 11 alpha = .27 eta = 3.1 model = BatchLDA(num_words=W,", "model = BatchLDA( num_words=4, num_topics=2, alpha=[.2, .05], eta=.2) model.lambdas = [ [100, 100,", ".2), (.41, .2)]: model = BatchLDA( num_words=100, num_topics=10, alpha=[.1, .1], eta=initial_eta) # this", "= random.rand(K + 1) alpha = random.rand(K, 1) model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel() -", "update_eta=True, emp_bayes_threshold=0.) # optimization should at least walk in the right direction and", "max, asarray, round, zeros_like from trlda.models import BatchLDA from trlda.utils import sample_dirichlet class", "walk in the right direction and don't explode self.assertLess(abs(model.eta - eta), abs(model.eta -", "in the right direction and don't explode self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta))", "model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20) self.assertLess(abs(model0.eta - model1.eta), 1e-20) if __name__ ==", "model0}, handle) # load model with open(tmp_file) as handle: model1 = load(handle)['model'] #", "self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError): model.alpha = random.rand(K + 1) alpha = random.rand(K, 1)", "dump from tempfile import mkstemp from random import choice, randint from string import", "model.lambdas = zeros_like(model.lambdas) + eta documents = model.sample(500, 10) model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.)", "wrong values model.alpha = [4., 4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) #", "K = 11 alpha = .27 eta = 3.1 model = BatchLDA(num_words=W, num_topics=K,", "1) model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self): model = BatchLDA(", "eta, initial_eta in [(.045, .2), (.41, .2)]: model = BatchLDA( num_words=100, num_topics=10, alpha=[.1,", "model = BatchLDA( num_words=100, num_topics=10, alpha=[.1, .1], eta=initial_eta) # this will sample a", "mkstemp()[1] # save model with open(tmp_file, 'w') as handle: dump({'model': model0}, handle) #", "model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20) self.assertLess(abs(model0.eta -", ".27 eta = 3.1 model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K,", "Bayes went in the right direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.) def", "- alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self): model = BatchLDA( num_words=4, num_topics=2, alpha=[.2, .05], eta=.2)", "= BatchLDA( num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file = mkstemp()[1] # save model with", "self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self): model = BatchLDA( num_words=4, num_topics=2, alpha=[.2, .05],", "= load(handle)['model'] # make sure parameters haven't changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas", "from tempfile import mkstemp from random import choice, randint from string import ascii_letters", "# save model with open(tmp_file, 'w') as handle: dump({'model': model0}, handle) # load", "self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)]) self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError): model.alpha =", "load model with open(tmp_file) as handle: model1 = load(handle)['model'] # make sure parameters", "test_empirical_bayes_alpha(self): model = BatchLDA( num_words=4, num_topics=2, alpha=[.2, .05], eta=.2) model.lambdas = [ [100,", "num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file = mkstemp()[1] # save model with open(tmp_file, 'w')", "self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self): for eta, initial_eta in [(.045, .2), (.41, .2)]: model", "import choice, randint from string import ascii_letters from numpy import corrcoef, random, abs,", "documents = model.sample(num_documents=100, length=20) # set alpha to wrong values model.alpha = [4.,", "num_topics=2, alpha=[.2, .05], eta=.2) model.lambdas = [ [100, 100, 1e-16, 1e-16], [1e-16, 1e-16,", "at least walk in the right direction and don't explode self.assertLess(abs(model.eta - eta),", "Tests(unittest.TestCase): def test_basics(self): W = 102 D = 1010 K = 11 alpha", "test_empirical_bayes_eta(self): for eta, initial_eta in [(.045, .2), (.41, .2)]: model = BatchLDA( num_words=100,", "num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file = mkstemp()[1] # save model with open(tmp_file, 'w') as", "import sample_dirichlet class Tests(unittest.TestCase): def test_basics(self): W = 102 D = 1010 K", "model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)]) self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError): model.alpha", "- 1)]) self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError): model.alpha = random.rand(K + 1) alpha =", "self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self): for eta, initial_eta in [(.045,", "10) model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.) # optimization should at least walk in the", "model with open(tmp_file) as handle: model1 = load(handle)['model'] # make sure parameters haven't", "(.41, .2)]: model = BatchLDA( num_words=100, num_topics=10, alpha=[.1, .1], eta=initial_eta) # this will", "random, abs, max, asarray, round, zeros_like from trlda.models import BatchLDA from trlda.utils import", "self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)]) self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError): model.alpha = random.rand(K +", "explode self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta)) def test_pickle(self): model0 = BatchLDA( num_words=300,", "given eta model.lambdas = zeros_like(model.lambdas) + eta documents = model.sample(500, 10) model.update_parameters(documents, max_epochs=10,", "BatchLDA( num_words=100, num_topics=10, alpha=[.1, .1], eta=initial_eta) # this will sample a beta with", "with open(tmp_file) as handle: model1 = load(handle)['model'] # make sure parameters haven't changed", "sure parameters haven't changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha", "= zeros_like(model.lambdas) + eta documents = model.sample(500, 10) model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.) #", "100, 100]] documents = model.sample(num_documents=100, length=20) # set alpha to wrong values model.alpha", "= model.sample(num_documents=100, length=20) # set alpha to wrong values model.alpha = [4., 4.]", "load(handle)['model'] # make sure parameters haven't changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas -", "+ 1) alpha = random.rand(K, 1) model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20)", "def test_basics(self): W = 102 D = 1010 K = 11 alpha =", "randint from string import ascii_letters from numpy import corrcoef, random, abs, max, asarray,", "4.) self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self): for eta, initial_eta in [(.045, .2), (.41, .2)]:", "choice, randint from string import ascii_letters from numpy import corrcoef, random, abs, max,", "went in the right direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self):", "= 3.1 model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W,", "the right direction and don't explode self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta)) def", "the given eta model.lambdas = zeros_like(model.lambdas) + eta documents = model.sample(500, 10) model.update_parameters(documents,", "eta=.2) model.lambdas = [ [100, 100, 1e-16, 1e-16], [1e-16, 1e-16, 100, 100]] documents", "[1e-16, 1e-16, 100, 100]] documents = model.sample(num_documents=100, length=20) # set alpha to wrong", "model1 = load(handle)['model'] # make sure parameters haven't changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics)", "model.sample(num_documents=100, length=20) # set alpha to wrong values model.alpha = [4., 4.] model.update_parameters(documents,", "right direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self): for eta, initial_eta", "values model.alpha = [4., 4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) # make", "# make sure empirical Bayes went in the right direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0],", "= model.sample(500, 10) model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.) # optimization should at least walk", "BatchLDA from trlda.utils import sample_dirichlet class Tests(unittest.TestCase): def test_basics(self): W = 102 D", "optimization should at least walk in the right direction and don't explode self.assertLess(abs(model.eta", "and don't explode self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta)) def test_pickle(self): model0 =", "= 1010 K = 11 alpha = .27 eta = 3.1 model =", "trlda.utils import sample_dirichlet class Tests(unittest.TestCase): def test_basics(self): W = 102 D = 1010", "sample_dirichlet class Tests(unittest.TestCase): def test_basics(self): W = 102 D = 1010 K =", "initial_eta in [(.045, .2), (.41, .2)]: model = BatchLDA( num_words=100, num_topics=10, alpha=[.1, .1],", "num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K -", "import unittest from time import time from pickle import load, dump from tempfile", "round, zeros_like from trlda.models import BatchLDA from trlda.utils import sample_dirichlet class Tests(unittest.TestCase): def", "= BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0,", "length=20) # set alpha to wrong values model.alpha = [4., 4.] model.update_parameters(documents, max_epochs=10,", "sure empirical Bayes went in the right direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1],", "num_topics=10, alpha=[.1, .1], eta=initial_eta) # this will sample a beta with the given", "should at least walk in the right direction and don't explode self.assertLess(abs(model.eta -", "from time import time from pickle import load, dump from tempfile import mkstemp", "import ascii_letters from numpy import corrcoef, random, abs, max, asarray, round, zeros_like from", "- model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20) self.assertLess(abs(model0.eta - model1.eta), 1e-20) if __name__", "1)]) self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError): model.alpha = random.rand(K + 1) alpha = random.rand(K,", "1e-16, 100, 100]] documents = model.sample(num_documents=100, length=20) # set alpha to wrong values", "K - 1)]) self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError): model.alpha = random.rand(K + 1) alpha", "model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self): for eta, initial_eta in [(.045, .2),", "time from pickle import load, dump from tempfile import mkstemp from random import", "don't explode self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta)) def test_pickle(self): model0 = BatchLDA(", "= 102 D = 1010 K = 11 alpha = .27 eta =", "alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self): model = BatchLDA( num_words=4, num_topics=2, alpha=[.2, .05], eta=.2) model.lambdas", "max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) # make sure empirical Bayes went in the right", "model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)]) self.assertEqual(eta, model.eta) with", "make sure empirical Bayes went in the right direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.)", "= 11 alpha = .27 eta = 3.1 model = BatchLDA(num_words=W, num_topics=K, alpha=alpha,", "1e-16], [1e-16, 1e-16, 100, 100]] documents = model.sample(num_documents=100, length=20) # set alpha to", "from numpy import corrcoef, random, abs, max, asarray, round, zeros_like from trlda.models import", "max_epochs=10, update_eta=True, emp_bayes_threshold=0.) # optimization should at least walk in the right direction", "from random import choice, randint from string import ascii_letters from numpy import corrcoef,", "mkstemp from random import choice, randint from string import ascii_letters from numpy import", "model with open(tmp_file, 'w') as handle: dump({'model': model0}, handle) # load model with", "model.sample(500, 10) model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.) # optimization should at least walk in", "least walk in the right direction and don't explode self.assertLess(abs(model.eta - eta), abs(model.eta", "dump({'model': model0}, handle) # load model with open(tmp_file) as handle: model1 = load(handle)['model']", "model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)]) self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError): model.alpha = random.rand(K", "right direction and don't explode self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta)) def test_pickle(self):", "eta=random.rand()) tmp_file = mkstemp()[1] # save model with open(tmp_file, 'w') as handle: dump({'model':", "BatchLDA( num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file = mkstemp()[1] # save model with open(tmp_file,", "alpha=random.rand(), eta=random.rand()) tmp_file = mkstemp()[1] # save model with open(tmp_file, 'w') as handle:", "emp_bayes_threshold=0.) # make sure empirical Bayes went in the right direction self.assertGreater(model.alpha[0], model.alpha[1])", "def test_empirical_bayes_alpha(self): model = BatchLDA( num_words=4, num_topics=2, alpha=[.2, .05], eta=.2) model.lambdas = [", "model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha,", "ascii_letters from numpy import corrcoef, random, abs, max, asarray, round, zeros_like from trlda.models", "= BatchLDA( num_words=4, num_topics=2, alpha=[.2, .05], eta=.2) model.lambdas = [ [100, 100, 1e-16,", "[100, 100, 1e-16, 1e-16], [1e-16, 1e-16, 100, 100]] documents = model.sample(num_documents=100, length=20) #", "= mkstemp()[1] # save model with open(tmp_file, 'w') as handle: dump({'model': model0}, handle)", "sample a beta with the given eta model.lambdas = zeros_like(model.lambdas) + eta documents", "abs(model.eta - initial_eta)) def test_pickle(self): model0 = BatchLDA( num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file", "self.assertRaises(RuntimeError): model.alpha = random.rand(K + 1) alpha = random.rand(K, 1) model.alpha = alpha", "100]] documents = model.sample(num_documents=100, length=20) # set alpha to wrong values model.alpha =", "handle: model1 = load(handle)['model'] # make sure parameters haven't changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics,", "update_alpha=True, emp_bayes_threshold=0.) # make sure empirical Bayes went in the right direction self.assertGreater(model.alpha[0],", "11 alpha = .27 eta = 3.1 model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta)", "will sample a beta with the given eta model.lambdas = zeros_like(model.lambdas) + eta", "model.eta) with self.assertRaises(RuntimeError): model.alpha = random.rand(K + 1) alpha = random.rand(K, 1) model.alpha", "to wrong values model.alpha = [4., 4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.)", "= random.rand(K, 1) model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self): model", "- initial_eta)) def test_pickle(self): model0 = BatchLDA( num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file =", "haven't changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)),", "tempfile import mkstemp from random import choice, randint from string import ascii_letters from", "zeros_like from trlda.models import BatchLDA from trlda.utils import sample_dirichlet class Tests(unittest.TestCase): def test_basics(self):", "import load, dump from tempfile import mkstemp from random import choice, randint from", "string import ascii_letters from numpy import corrcoef, random, abs, max, asarray, round, zeros_like", "eta model.lambdas = zeros_like(model.lambdas) + eta documents = model.sample(500, 10) model.update_parameters(documents, max_epochs=10, update_eta=True,", "4.) def test_empirical_bayes_eta(self): for eta, initial_eta in [(.045, .2), (.41, .2)]: model =", "alpha=[.1, .1], eta=initial_eta) # this will sample a beta with the given eta", "def test_pickle(self): model0 = BatchLDA( num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file = mkstemp()[1] #", "def test_empirical_bayes_eta(self): for eta, initial_eta in [(.045, .2), (.41, .2)]: model = BatchLDA(", "= alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self): model = BatchLDA( num_words=4, num_topics=2,", "random.rand(K + 1) alpha = random.rand(K, 1) model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())),", "make sure parameters haven't changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20)", "alpha = random.rand(K, 1) model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self):", "self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20) self.assertLess(abs(model0.eta - model1.eta),", "BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K", "model.alpha.ravel()[randint(0, K - 1)]) self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError): model.alpha = random.rand(K + 1)", "100, 1e-16, 1e-16], [1e-16, 1e-16, 100, 100]] documents = model.sample(num_documents=100, length=20) # set", "num_words=4, num_topics=2, alpha=[.2, .05], eta=.2) model.lambdas = [ [100, 100, 1e-16, 1e-16], [1e-16,", "unittest from time import time from pickle import load, dump from tempfile import", "from pickle import load, dump from tempfile import mkstemp from random import choice,", "102 D = 1010 K = 11 alpha = .27 eta = 3.1", "1e-16, 1e-16], [1e-16, 1e-16, 100, 100]] documents = model.sample(num_documents=100, length=20) # set alpha", "= BatchLDA( num_words=100, num_topics=10, alpha=[.1, .1], eta=initial_eta) # this will sample a beta", "as handle: model1 = load(handle)['model'] # make sure parameters haven't changed self.assertEqual(model0.num_words, model1.num_words)", "handle) # load model with open(tmp_file) as handle: model1 = load(handle)['model'] # make", "[(.045, .2), (.41, .2)]: model = BatchLDA( num_words=100, num_topics=10, alpha=[.1, .1], eta=initial_eta) #", "import mkstemp from random import choice, randint from string import ascii_letters from numpy", "# optimization should at least walk in the right direction and don't explode", "self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20) self.assertLess(abs(model0.eta - model1.eta), 1e-20) if", "save model with open(tmp_file, 'w') as handle: dump({'model': model0}, handle) # load model", "= .27 eta = 3.1 model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics)", "documents = model.sample(500, 10) model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.) # optimization should at least", "BatchLDA( num_words=4, num_topics=2, alpha=[.2, .05], eta=.2) model.lambdas = [ [100, 100, 1e-16, 1e-16],", "model.alpha = [4., 4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) # make sure", "alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self): model = BatchLDA( num_words=4, num_topics=2, alpha=[.2,", "update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) # make sure empirical Bayes went in the right direction", "open(tmp_file, 'w') as handle: dump({'model': model0}, handle) # load model with open(tmp_file) as", "pickle import load, dump from tempfile import mkstemp from random import choice, randint", "alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)])", "model0 = BatchLDA( num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file = mkstemp()[1] # save model", "# set alpha to wrong values model.alpha = [4., 4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200,", "random.rand(K, 1) model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self): model =", "zeros_like(model.lambdas) + eta documents = model.sample(500, 10) model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.) # optimization", "direction and don't explode self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta)) def test_pickle(self): model0", "self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)]) self.assertEqual(eta, model.eta) with self.assertRaises(RuntimeError):", "changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20)", "model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) # make sure empirical Bayes went in", "W = 102 D = 1010 K = 11 alpha = .27 eta", "import time from pickle import load, dump from tempfile import mkstemp from random", "handle: dump({'model': model0}, handle) # load model with open(tmp_file) as handle: model1 =", "eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)]) self.assertEqual(eta,", "model.lambdas = [ [100, 100, 1e-16, 1e-16], [1e-16, 1e-16, 100, 100]] documents =", "from trlda.utils import sample_dirichlet class Tests(unittest.TestCase): def test_basics(self): W = 102 D =", "test_basics(self): W = 102 D = 1010 K = 11 alpha = .27", "the right direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self): for eta,", "for eta, initial_eta in [(.045, .2), (.41, .2)]: model = BatchLDA( num_words=100, num_topics=10,", "import BatchLDA from trlda.utils import sample_dirichlet class Tests(unittest.TestCase): def test_basics(self): W = 102", ".05], eta=.2) model.lambdas = [ [100, 100, 1e-16, 1e-16], [1e-16, 1e-16, 100, 100]]", "with open(tmp_file, 'w') as handle: dump({'model': model0}, handle) # load model with open(tmp_file)", "tmp_file = mkstemp()[1] # save model with open(tmp_file, 'w') as handle: dump({'model': model0},", "abs, max, asarray, round, zeros_like from trlda.models import BatchLDA from trlda.utils import sample_dirichlet", "as handle: dump({'model': model0}, handle) # load model with open(tmp_file) as handle: model1", "beta with the given eta model.lambdas = zeros_like(model.lambdas) + eta documents = model.sample(500,", "emp_bayes_threshold=0.) # optimization should at least walk in the right direction and don't", "# make sure parameters haven't changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)),", "random import choice, randint from string import ascii_letters from numpy import corrcoef, random,", "initial_eta)) def test_pickle(self): model0 = BatchLDA( num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file = mkstemp()[1]", "test_pickle(self): model0 = BatchLDA( num_words=300, num_topics=50, alpha=random.rand(), eta=random.rand()) tmp_file = mkstemp()[1] # save", "[ [100, 100, 1e-16, 1e-16], [1e-16, 1e-16, 100, 100]] documents = model.sample(num_documents=100, length=20)", "direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self): for eta, initial_eta in", "self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20) self.assertLess(abs(model0.eta", "open(tmp_file) as handle: model1 = load(handle)['model'] # make sure parameters haven't changed self.assertEqual(model0.num_words,", "with the given eta model.lambdas = zeros_like(model.lambdas) + eta documents = model.sample(500, 10)", "trlda.models import BatchLDA from trlda.utils import sample_dirichlet class Tests(unittest.TestCase): def test_basics(self): W =", "from trlda.models import BatchLDA from trlda.utils import sample_dirichlet class Tests(unittest.TestCase): def test_basics(self): W", "corrcoef, random, abs, max, asarray, round, zeros_like from trlda.models import BatchLDA from trlda.utils", "parameters haven't changed self.assertEqual(model0.num_words, model1.num_words) self.assertEqual(model0.num_topics, model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha -", "self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.) def test_empirical_bayes_eta(self): for eta, initial_eta in [(.045, .2), (.41,", "num_words=100, num_topics=10, alpha=[.1, .1], eta=initial_eta) # this will sample a beta with the", "- eta), abs(model.eta - initial_eta)) def test_pickle(self): model0 = BatchLDA( num_words=300, num_topics=50, alpha=random.rand(),", "= [4., 4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) # make sure empirical", "eta documents = model.sample(500, 10) model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.) # optimization should at", "asarray, round, zeros_like from trlda.models import BatchLDA from trlda.utils import sample_dirichlet class Tests(unittest.TestCase):", "self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size) self.assertEqual(W, model.num_words) self.assertEqual(alpha, model.alpha.ravel()[randint(0, K - 1)]) self.assertEqual(eta, model.eta)", "self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20) self.assertLess(abs(model0.eta - model1.eta), 1e-20) if __name__ == '__main__': unittest.main()", "time import time from pickle import load, dump from tempfile import mkstemp from", "self.assertLess(abs(model.eta - eta), abs(model.eta - initial_eta)) def test_pickle(self): model0 = BatchLDA( num_words=300, num_topics=50,", "empirical Bayes went in the right direction self.assertGreater(model.alpha[0], model.alpha[1]) self.assertLess(model.alpha[0], 4.) self.assertLess(model.alpha[1], 4.)", "1e-20) def test_empirical_bayes_alpha(self): model = BatchLDA( num_words=4, num_topics=2, alpha=[.2, .05], eta=.2) model.lambdas =", "eta = 3.1 model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K, model.num_topics) self.assertEqual(K, model.alpha.size)", "set alpha to wrong values model.alpha = [4., 4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False,", "1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20) self.assertLess(abs(model0.eta - model1.eta), 1e-20) if __name__ == '__main__':", "+ eta documents = model.sample(500, 10) model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.) # optimization should", "in [(.045, .2), (.41, .2)]: model = BatchLDA( num_words=100, num_topics=10, alpha=[.1, .1], eta=initial_eta)", "class Tests(unittest.TestCase): def test_basics(self): W = 102 D = 1010 K = 11", ".2)]: model = BatchLDA( num_words=100, num_topics=10, alpha=[.1, .1], eta=initial_eta) # this will sample", "# load model with open(tmp_file) as handle: model1 = load(handle)['model'] # make sure", "model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel() - alpha.ravel())), 1e-20) def test_empirical_bayes_alpha(self): model = BatchLDA( num_words=4,", "[4., 4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) # make sure empirical Bayes", "max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) # make sure empirical Bayes went in the", "this will sample a beta with the given eta model.lambdas = zeros_like(model.lambdas) +", ".1], eta=initial_eta) # this will sample a beta with the given eta model.lambdas", "a beta with the given eta model.lambdas = zeros_like(model.lambdas) + eta documents =", "import corrcoef, random, abs, max, asarray, round, zeros_like from trlda.models import BatchLDA from", "alpha=[.2, .05], eta=.2) model.lambdas = [ [100, 100, 1e-16, 1e-16], [1e-16, 1e-16, 100,", "4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True, emp_bayes_threshold=0.) # make sure empirical Bayes went", "model1.num_topics) self.assertLess(max(abs(model0.lambdas - model1.lambdas)), 1e-20) self.assertLess(max(abs(model0.alpha - model1.alpha)), 1e-20) self.assertLess(abs(model0.eta - model1.eta), 1e-20)", "alpha to wrong values model.alpha = [4., 4.] model.update_parameters(documents, max_epochs=10, max_iter_inference=200, update_lambda=False, update_alpha=True,", "alpha = .27 eta = 3.1 model = BatchLDA(num_words=W, num_topics=K, alpha=alpha, eta=eta) self.assertEqual(K,", "model.update_parameters(documents, max_epochs=10, update_eta=True, emp_bayes_threshold=0.) # optimization should at least walk in the right", "from string import ascii_letters from numpy import corrcoef, random, abs, max, asarray, round,", "eta=initial_eta) # this will sample a beta with the given eta model.lambdas =", "= [ [100, 100, 1e-16, 1e-16], [1e-16, 1e-16, 100, 100]] documents = model.sample(num_documents=100,", "model.alpha = random.rand(K + 1) alpha = random.rand(K, 1) model.alpha = alpha self.assertLess(max(abs(model.alpha.ravel()" ]
[ "'st1', 'value': '0.95%'}, {'place': 'st2', 'value': '1.10%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def", "'d1', 'value': '66.74%'}], 'Worst performers': [{'place': 'd1', 'value': '66.74%'}], 'indicator': 'Weighing efficiency'}], [{'Best", "provided SNP for atleast 21+ days': '1.51%', '% of children between 6 months", "between 3-6 years provided SNP for atleast 21+ days': '1.08%', '% of children", "'Number of States Covered': 3}, 'Service Delivery': { '% of children between 3-6", "years, P&LW provided THR for atleast 21+ days': '25.32%', '% of trimester three", "'Weighing efficiency'}], [{'Best performers': [{'place': 'st2', 'value': '2.89%'}, {'place': 'st1', 'value': '1.44%'}, {'place':", "for atleast 21+ days': '6.66%', '% of children between 3-6 years provided SNP", "data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'comparative', { 'aggregation_level': 2, 'state_id':", "'142.40%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'd1', 'value': '1.62%'}], 'Worst performers': [{'place':", "'0.95%'}, {'place': 'st2', 'value': '1.10%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_month(self): self.maxDiff", "performers': [{'place': 'd1', 'value': '72.97%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'd1', 'value': '28.67%'}],", "'% of children between 3-6 years provided PSE for atleast 21+ days': '6.66%',", "Efficiency'}, {'Best performers': [{'place': 'st1', 'value': '60.32%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st7',", "[{'place': 'st2', 'value': '8.41%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst", "'0.00%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st1', 'value': '0.66%'}], 'indicator': 'Home Visits'}]], 'Service", "'value': '8.41%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place':", "'72.15%', 'Height Measurement Efficiency': '3.24%', 'Weighing efficiency': '70.27%'}} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_quarter(self): self.maxDiff", "'Weighing efficiency': '70.27%'}} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data(", "Covered': 4, 'Number of States Covered': 3}, 'Service Delivery': { '% of children", "self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'comparative', {", "'value': '0.95%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place':", "'% of trimester three women counselled on immediate and EBF': '59.09%', 'Height Measurement", "'month', 'comparative', { 'aggregation_level': 2, 'state_id': 'st1', }, False ) expected = {'ICDS", "[{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st2', 'value': '2.89%'}], 'indicator':", "'AWC Open'}, {'Best performers': [{'place': 'd1', 'value': '1.62%'}], 'Worst performers': [{'place': 'd1', 'value':", "counselled on immediate and EBF': '72.15%', 'Height Measurement Efficiency': '3.24%', 'Weighing efficiency': '70.27%'}}", "'aggregation_level': 1, }, False ) expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place':", "{'Best performers': [{'place': 'st2', 'value': '1.10%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st7', 'value':", "[{'Best performers': [{'place': 'st2', 'value': '34.75%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st7', 'value':", "'70.40%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7',", "Home Visits': '0.32%', 'Number of AWCs Launched': 22, 'Number of Blocks Covered': 5,", "2017, None, 2, 'quarter', 'aggregated', { 'aggregation_level': 1, }, False ) expected =", "'d1', 'value': '28.67%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'd1', 'value': '0.83%'}],", "performers': [{'place': 'st2', 'value': '8.41%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st7', 'value': '0.00%'}],", "of children between 3-6 years provided PSE for atleast 21+ days': '5.54%', '%", "of Blocks Covered': 5, 'Number of Districts Covered': 4, 'Number of States Covered':", "[{'place': 'd1', 'value': '66.74%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'd1', 'value': '1.47%'}],", "'value': '142.40%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'd1', 'value': '1.62%'}], 'Worst performers':", "'Weighing efficiency'}], [{'Best performers': [{'place': 'd1', 'value': '1.47%'}], 'Worst performers': [{'place': 'd1', 'value':", "'118.18%', '% of Home Visits': '0.79%', 'Number of AWCs Launched': 22, 'Number of", "children between 3-6 years provided SNP for atleast 21+ days': '1.08%', '% of", "'st2', 'value': '1.10%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers':", "CAS Coverage': {'% Number of Days AWC Were opened': '53.27%', '% of Home", "'0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st2',", "children between 3-6 years provided PSE for atleast 21+ days': '6.66%', '% of", "children between 6 months -3 years, P&LW provided THR for atleast 21+ days':", "Were opened': '118.18%', '% of Home Visits': '0.79%', 'Number of AWCs Launched': 22,", "days': '1.51%', '% of children between 6 months -3 years, P&LW provided THR", "performers': [{'place': 'st1', 'value': '60.32%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st7', 'value': '0.00%'}],", "TestCase from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data class TestPPDData(TestCase): def test_get_ppr_data_comparative_month(self): self.maxDiff = None data", "'57.97%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2',", "performers': [{'place': 'st1', 'value': '0.66%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st7', 'value': '0.00%'}],", "Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_comparative_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017,", "'1.62%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [ [{'Best performers': [{'place': 'd1', 'value': '1.45%'}],", "Measurement Efficiency': '3.24%', 'Weighing efficiency': '70.27%'}} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_quarter(self): self.maxDiff = None", "of trimester three women counselled on immediate and EBF': '59.09%', 'Height Measurement Efficiency':", "'st1', 'value': '1.44%'}, {'place': 'st2', 'value': '2.89%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers':", "performers': [{'place': 'd1', 'value': '142.40%'}], 'Worst performers': [{'place': 'd1', 'value': '142.40%'}], 'indicator': 'AWC", "'aggregation_level': 2, 'state_id': 'st1', }, False ) expected = {'ICDS CAS Coverage': [[{'Best", "'% of Home Visits': '0.79%', 'Number of AWCs Launched': 22, 'Number of Blocks", "import get_poshan_progress_dashboard_data class TestPPDData(TestCase): def test_get_ppr_data_comparative_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas',", "data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'comparative', { 'aggregation_level': 1, },", "'0.95%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1',", "'Worst performers': [{'place': 'd1', 'value': '1.45%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'd1',", "self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None,", "P&LW provided THR for atleast 21+ days': '25.32%', '% of trimester three women", "'value': '72.97%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'd1', 'value': '28.67%'}], 'Worst performers': [{'place':", "'0.66%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7',", "{ 'aggregation_level': 1, }, False ) expected = {'ICDS CAS Coverage': {'% Number", "years provided SNP for atleast 21+ days': '1.08%', '% of children between 6", "'Pre-school Education'}, {'Best performers': [{'place': 'd1', 'value': '66.74%'}], 'Worst performers': [{'place': 'd1', 'value':", "of Days AWC Were opened': '53.27%', '% of Home Visits': '0.32%', 'Number of", "'d1', 'value': '1.62%'}], 'Worst performers': [{'place': 'd1', 'value': '1.62%'}], 'indicator': 'Home Visits'}]], 'Service", "21+ days': '25.32%', '% of trimester three women counselled on immediate and EBF':", "expected = {'ICDS CAS Coverage': {'% Number of Days AWC Were opened': '118.18%',", "Home Visits': '0.79%', 'Number of AWCs Launched': 22, 'Number of Blocks Covered': 5,", "= {'ICDS CAS Coverage': {'% Number of Days AWC Were opened': '53.27%', '%", "[{'place': 'd1', 'value': '1.45%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'd1', 'value': '66.74%'}],", "'d1', 'value': '0.83%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_comparative_quarter(self): self.maxDiff = None", "Delivery': [[{'Best performers': [{'place': 'st2', 'value': '8.41%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st7',", "immediate and EBF': '72.15%', 'Height Measurement Efficiency': '3.24%', 'Weighing efficiency': '70.27%'}} self.assertDictEqual(expected, data)", "Visits'}]], 'Service Delivery': [[{'Best performers': [{'place': 'st2', 'value': '8.41%'}, {'place': 'st1', 'value': '2.52%'},", "'14.60%'}, {'place': 'st2', 'value': '34.75%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'st2',", "'Worst performers': [{'place': 'd1', 'value': '28.67%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place':", "'0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st2',", "'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '0.95%'}, {'place':", "performers': [{'place': 'd1', 'value': '66.74%'}], 'Worst performers': [{'place': 'd1', 'value': '66.74%'}], 'indicator': 'Weighing", "test_get_ppr_data_comparative_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'comparative',", "[{'place': 'd1', 'value': '142.40%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'd1', 'value': '1.62%'}],", "2, 'month', 'aggregated', { 'aggregation_level': 1, }, False ) expected = {'ICDS CAS", "'28.67%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'd1', 'value': '0.83%'}], 'Worst performers':", "'st2', 'value': '0.00%'}, {'place': 'st1', 'value': '0.66%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [[{'Best", "efficiency'}], [{'Best performers': [{'place': 'st2', 'value': '2.89%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st7',", "years, P&LW provided THR for atleast 21+ days': '43.65%', '% of trimester three", "}, False ) expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'st1', 'value':", "def test_get_ppr_data_comparative_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month',", "get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'comparative', { 'aggregation_level': 2, 'state_id': 'st1', },", "import TestCase from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data class TestPPDData(TestCase): def test_get_ppr_data_comparative_month(self): self.maxDiff = None", "from django.test import TestCase from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data class TestPPDData(TestCase): def test_get_ppr_data_comparative_month(self): self.maxDiff", "'1.47%'}], 'Worst performers': [{'place': 'd1', 'value': '1.47%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers':", "'value': '34.75%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'st2', 'value': '1.10%'}, {'place':", "[{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st1', 'value': '60.32%'}], 'indicator':", "of Days AWC Were opened': '118.18%', '% of Home Visits': '0.79%', 'Number of", "'value': '1.44%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place':", "THR for atleast 21+ days': '25.32%', '% of trimester three women counselled on", "'comparative', { 'aggregation_level': 2, 'state_id': 'st1', }, False ) expected = {'ICDS CAS", "Education'}, {'Best performers': [{'place': 'd1', 'value': '66.74%'}], 'Worst performers': [{'place': 'd1', 'value': '66.74%'}],", "performers': [{'place': 'st2', 'value': '1.10%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st7', 'value': '0.00%'}],", "[{'place': 'd1', 'value': '66.74%'}], 'Worst performers': [{'place': 'd1', 'value': '66.74%'}], 'indicator': 'Weighing efficiency'}],", "between 6 months -3 years, P&LW provided THR for atleast 21+ days': '43.65%',", "[{'place': 'd1', 'value': '72.97%'}], 'Worst performers': [{'place': 'd1', 'value': '72.97%'}], 'indicator': 'Counselling'}], [{'Best", "[{'Best performers': [{'place': 'st2', 'value': '2.89%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st7', 'value':", "'0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st1',", "Visits': '0.32%', 'Number of AWCs Launched': 22, 'Number of Blocks Covered': 5, 'Number", "'st1', 'value': '60.32%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'st2', 'value': '34.75%'}, {'place': 'st1',", "days': '25.32%', '% of trimester three women counselled on immediate and EBF': '59.09%',", "'value': '1.47%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'd1', 'value': '72.97%'}], 'Worst", "{'ICDS CAS Coverage': {'% Number of Days AWC Were opened': '118.18%', '% of", "'Take Home Ration'}, {'Best performers': [{'place': 'd1', 'value': '0.83%'}], 'Worst performers': [{'place': 'd1',", "of Home Visits': '0.32%', 'Number of AWCs Launched': 22, 'Number of Blocks Covered':", "get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'aggregated', { 'aggregation_level': 1, }, False )", "'34.75%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7',", "'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st1', 'value':", "3-6 years provided PSE for atleast 21+ days': '5.54%', '% of children between", "PSE for atleast 21+ days': '6.66%', '% of children between 3-6 years provided", "'value': '1.45%'}], 'Worst performers': [{'place': 'd1', 'value': '1.45%'}], 'indicator': 'Pre-school Education'}, {'Best performers':", "'st1', }, False ) expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'd1',", "3-6 years provided SNP for atleast 21+ days': '1.08%', '% of children between", "'Worst performers': [{'place': 'd1', 'value': '72.97%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'd1', 'value':", "'Pre-school Education'}, {'Best performers': [{'place': 'st2', 'value': '70.40%'}, {'place': 'st1', 'value': '67.39%'}, {'place':", "'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'd1', 'value': '66.74%'}], 'Worst performers': [{'place': 'd1',", "{'place': 'st2', 'value': '47.76%'}, {'place': 'st1', 'value': '64.80%'}], 'indicator': 'AWC Open'}, {'Best performers':", "Efficiency'}, {'Best performers': [{'place': 'd1', 'value': '72.97%'}], 'Worst performers': [{'place': 'd1', 'value': '72.97%'}],", "'value': '70.40%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place':", "[{'place': 'd1', 'value': '28.67%'}], 'Worst performers': [{'place': 'd1', 'value': '28.67%'}], 'indicator': 'Take Home", "performers': [{'place': 'd1', 'value': '66.74%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'd1', 'value':", "= get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'comparative', { 'aggregation_level': 1, }, False", "'0.79%', 'Number of AWCs Launched': 22, 'Number of Blocks Covered': 5, 'Number of", "trimester three women counselled on immediate and EBF': '72.15%', 'Height Measurement Efficiency': '3.24%',", "Coverage': [[{'Best performers': [{'place': 'd1', 'value': '142.40%'}], 'Worst performers': [{'place': 'd1', 'value': '142.40%'}],", "performers': [{'place': 'd1', 'value': '28.67%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'd1',", "'value': '34.75%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place':", "'value': '72.97%'}], 'Worst performers': [{'place': 'd1', 'value': '72.97%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place':", "'value': '47.76%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place':", "Measurement Efficiency'}, {'Best performers': [{'place': 'd1', 'value': '72.97%'}], 'Worst performers': [{'place': 'd1', 'value':", "'0.83%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_comparative_quarter(self): self.maxDiff = None data =", "'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st2', 'value': '70.40%'}], 'indicator': 'Weighing", "'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'st2', 'value': '2.89%'}, {'place': 'st1', 'value': '1.44%'},", "2017, 5, 2, 'month', 'aggregated', { 'aggregation_level': 1, }, False ) expected =", "years provided PSE for atleast 21+ days': '6.66%', '% of children between 3-6", "SNP for atleast 21+ days': '1.51%', '% of children between 6 months -3", "'1.44%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1',", ") expected = {'ICDS CAS Coverage': {'% Number of Days AWC Were opened':", "'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '14.60%'},", "-3 years, P&LW provided THR for atleast 21+ days': '43.65%', '% of trimester", "21+ days': '43.65%', '% of trimester three women counselled on immediate and EBF':", "= None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'aggregated', { 'aggregation_level':", "'value': '1.47%'}], 'Worst performers': [{'place': 'd1', 'value': '1.47%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best", "self.assertDictEqual(expected, data) def test_get_ppr_data_comparative_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None,", "Blocks Covered': 5, 'Number of Districts Covered': 4, 'Number of States Covered': 3},", "'state_id': 'st1', }, False ) expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place':", "class TestPPDData(TestCase): def test_get_ppr_data_comparative_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5,", "data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'aggregated', { 'aggregation_level': 1, },", "'value': '1.44%'}, {'place': 'st2', 'value': '2.89%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place':", "of Home Visits': '0.79%', 'Number of AWCs Launched': 22, 'Number of Blocks Covered':", "'1.62%'}], 'Worst performers': [{'place': 'd1', 'value': '1.62%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [", "'2.89%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7',", "'value': '67.39%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place':", "None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'aggregated', { 'aggregation_level': 1,", "'st2', 'value': '47.76%'}, {'place': 'st1', 'value': '64.80%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place':", "5, 2, 'month', 'aggregated', { 'aggregation_level': 1, }, False ) expected = {'ICDS", "'64.80%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'st1', 'value': '0.66%'}, {'place': 'st2', 'value':", "PSE for atleast 21+ days': '5.54%', '% of children between 3-6 years provided", "'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st2', 'value': '34.75%'}], 'indicator': 'Take", "'8.41%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'st2', 'value': '70.40%'}, {'place': 'st1', 'value':", "[{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st2', 'value': '1.10%'}], 'indicator':", "def test_get_ppr_data_aggregated_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month',", "Districts Covered': 4, 'Number of States Covered': 3}, 'Service Delivery': { '% of", "performers': [{'place': 'st2', 'value': '2.89%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st7', 'value': '0.00%'}],", "'0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st1',", "= {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'st1', 'value': '64.80%'}, {'place': 'st2', 'value':", "False ) expected = {'ICDS CAS Coverage': {'% Number of Days AWC Were", "self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5,", "custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data class TestPPDData(TestCase): def test_get_ppr_data_comparative_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data(", "years provided SNP for atleast 21+ days': '1.51%', '% of children between 6", "'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'st2', 'value': '1.10%'}, {'place': 'st1', 'value':", "'value': '0.00%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st2', 'value': '8.41%'}], 'indicator': 'Pre-school Education'},", "performers': [{'place': 'd1', 'value': '0.83%'}], 'Worst performers': [{'place': 'd1', 'value': '0.83%'}], 'indicator': 'Supplementary", "'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'st2', 'value': '70.40%'}, {'place': 'st1', 'value': '67.39%'},", "'value': '64.80%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place':", "'value': '2.52%'}, {'place': 'st2', 'value': '8.41%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'st2',", "{'place': 'st1', 'value': '0.95%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value':", "performers': [{'place': 'd1', 'value': '1.45%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'd1', 'value':", "'34.75%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'st2', 'value': '1.10%'}, {'place': 'st1',", "'st2', 'value': '8.41%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'st2', 'value': '70.40%'}, {'place':", "Coverage': {'% Number of Days AWC Were opened': '118.18%', '% of Home Visits':", "and EBF': '72.15%', 'Height Measurement Efficiency': '3.24%', 'Weighing efficiency': '70.27%'}} self.assertDictEqual(expected, data) def", "'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st2', 'value':", "provided PSE for atleast 21+ days': '5.54%', '% of children between 3-6 years", "'5.54%', '% of children between 3-6 years provided SNP for atleast 21+ days':", "days': '1.08%', '% of children between 6 months -3 years, P&LW provided THR", "{'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value':", "'% of Home Visits': '0.32%', 'Number of AWCs Launched': 22, 'Number of Blocks", "'value': '28.67%'}], 'Worst performers': [{'place': 'd1', 'value': '28.67%'}], 'indicator': 'Take Home Ration'}, {'Best", "'Home Visits'}]], 'Service Delivery': [ [{'Best performers': [{'place': 'd1', 'value': '1.45%'}], 'Worst performers':", "2, 'quarter', 'comparative', { 'aggregation_level': 1, }, False ) expected = {'ICDS CAS", "performers': [{'place': 'd1', 'value': '72.97%'}], 'Worst performers': [{'place': 'd1', 'value': '72.97%'}], 'indicator': 'Counselling'}],", "performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st1', 'value': '60.32%'}],", "'value': '57.97%'}, {'place': 'st1', 'value': '60.32%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'st2', 'value':", "'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'd1', 'value': '1.47%'}], 'Worst performers': [{'place': 'd1',", "2, 'state_id': 'st1', }, False ) expected = {'ICDS CAS Coverage': [[{'Best performers':", "{'Best performers': [{'place': 'st1', 'value': '0.66%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st7', 'value':", "performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st2', 'value': '8.41%'}],", "provided PSE for atleast 21+ days': '6.66%', '% of children between 3-6 years", "'% of children between 3-6 years provided SNP for atleast 21+ days': '1.51%',", "{'place': 'st2', 'value': '34.75%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'st2', 'value':", "Number of Days AWC Were opened': '53.27%', '% of Home Visits': '0.32%', 'Number", "'Number of Districts Covered': 4, 'Number of States Covered': 3}, 'Service Delivery': {", "get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'comparative', { 'aggregation_level': 1, }, False )", "[{'place': 'd1', 'value': '0.83%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_comparative_quarter(self): self.maxDiff =", "SNP for atleast 21+ days': '1.08%', '% of children between 6 months -3", "'0.00%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st1', 'value': '60.32%'}], 'indicator': 'Counselling'}], [{'Best performers':", "data) def test_get_ppr_data_comparative_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2,", "'d1', 'value': '72.97%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'd1', 'value': '28.67%'}], 'Worst performers':", "Were opened': '53.27%', '% of Home Visits': '0.32%', 'Number of AWCs Launched': 22,", "trimester three women counselled on immediate and EBF': '59.09%', 'Height Measurement Efficiency': '2.24%',", "provided THR for atleast 21+ days': '25.32%', '% of trimester three women counselled", "'% of children between 6 months -3 years, P&LW provided THR for atleast", "= None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'aggregated', { 'aggregation_level':", "of children between 3-6 years provided SNP for atleast 21+ days': '1.51%', '%", "[{'place': 'd1', 'value': '142.40%'}], 'Worst performers': [{'place': 'd1', 'value': '142.40%'}], 'indicator': 'AWC Open'},", "Open'}, {'Best performers': [{'place': 'd1', 'value': '1.62%'}], 'Worst performers': [{'place': 'd1', 'value': '1.62%'}],", "'47.76%'}, {'place': 'st1', 'value': '64.80%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'st1', 'value':", "'value': '66.74%'}], 'Worst performers': [{'place': 'd1', 'value': '66.74%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers':", "efficiency'}], [{'Best performers': [{'place': 'd1', 'value': '1.47%'}], 'Worst performers': [{'place': 'd1', 'value': '1.47%'}],", "'d1', 'value': '28.67%'}], 'Worst performers': [{'place': 'd1', 'value': '28.67%'}], 'indicator': 'Take Home Ration'},", "Open'}, {'Best performers': [{'place': 'st1', 'value': '0.66%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st7',", "'d1', 'value': '0.83%'}], 'Worst performers': [{'place': 'd1', 'value': '0.83%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected,", "data) def test_get_ppr_data_aggregated_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2,", "States Covered': 3}, 'Service Delivery': { '% of children between 3-6 years provided", "'1.44%'}, {'place': 'st2', 'value': '2.89%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'st1',", "Measurement Efficiency'}, {'Best performers': [{'place': 'st1', 'value': '60.32%'}, {'place': 'st2', 'value': '57.97%'}, {'place':", "def test_get_ppr_data_comparative_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter',", "'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st1', 'value': '0.66%'}], 'indicator': 'Home", "'aggregation_level': 1, }, False ) expected = {'ICDS CAS Coverage': {'% Number of", "6 months -3 years, P&LW provided THR for atleast 21+ days': '25.32%', '%", "{'place': 'st2', 'value': '0.00%'}, {'place': 'st1', 'value': '0.66%'}], 'indicator': 'Home Visits'}]], 'Service Delivery':", "three women counselled on immediate and EBF': '59.09%', 'Height Measurement Efficiency': '2.24%', 'Weighing", "{'place': 'st2', 'value': '8.41%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'st2', 'value': '70.40%'},", "'72.97%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'd1', 'value': '28.67%'}], 'Worst performers': [{'place': 'd1',", "{'place': 'st2', 'value': '47.76%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value':", "{'Best performers': [{'place': 'st2', 'value': '70.40%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st7', 'value':", "}, False ) expected = {'ICDS CAS Coverage': {'% Number of Days AWC", "'AWC Open'}, {'Best performers': [{'place': 'st1', 'value': '0.66%'}, {'place': 'st2', 'value': '0.00%'}, {'place':", "'value': '0.00%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st2', 'value': '1.10%'}], 'indicator': 'Supplementary Nutrition'}]]}", "'2.52%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1',", "'st2', 'value': '2.89%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers':", "performers': [{'place': 'st2', 'value': '34.75%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st7', 'value': '0.00%'}],", "'value': '0.00%'}, {'place': 'st1', 'value': '0.66%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [[{'Best performers':", "'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '0.95%'},", "on immediate and EBF': '59.09%', 'Height Measurement Efficiency': '2.24%', 'Weighing efficiency': '68.81%'}} self.assertDictEqual(expected,", "'value': '1.62%'}], 'Worst performers': [{'place': 'd1', 'value': '1.62%'}], 'indicator': 'Home Visits'}]], 'Service Delivery':", "'0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st1',", "'value': '57.97%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place':", "[{'place': 'd1', 'value': '1.62%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [ [{'Best performers': [{'place':", "atleast 21+ days': '1.08%', '% of children between 6 months -3 years, P&LW", "'indicator': 'Home Visits'}]], 'Service Delivery': [ [{'Best performers': [{'place': 'd1', 'value': '1.45%'}], 'Worst", "'icds-cas', 2017, None, 2, 'quarter', 'aggregated', { 'aggregation_level': 1, }, False ) expected", "'value': '14.60%'}, {'place': 'st2', 'value': '34.75%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place':", "'70.27%'}} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017,", "'d1', 'value': '1.47%'}], 'Worst performers': [{'place': 'd1', 'value': '1.47%'}], 'indicator': 'Height Measurement Efficiency'},", "5, 2, 'month', 'comparative', { 'aggregation_level': 2, 'state_id': 'st1', }, False ) expected", "for atleast 21+ days': '25.32%', '% of trimester three women counselled on immediate", "'0.00%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2',", "'icds-cas', 2017, None, 2, 'quarter', 'comparative', { 'aggregation_level': 1, }, False ) expected", "{'ICDS CAS Coverage': [[{'Best performers': [{'place': 'st1', 'value': '64.80%'}, {'place': 'st2', 'value': '47.76%'},", "AWC Were opened': '53.27%', '% of Home Visits': '0.32%', 'Number of AWCs Launched':", "'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '0.00%'},", "self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'aggregated', {", "'value': '60.32%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place':", "Covered': 5, 'Number of Districts Covered': 4, 'Number of States Covered': 3}, 'Service", "[{'place': 'st1', 'value': '0.66%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst", "'indicator': 'Home Visits'}]], 'Service Delivery': [[{'Best performers': [{'place': 'st2', 'value': '8.41%'}, {'place': 'st1',", "women counselled on immediate and EBF': '59.09%', 'Height Measurement Efficiency': '2.24%', 'Weighing efficiency':", "Delivery': { '% of children between 3-6 years provided PSE for atleast 21+", "years provided PSE for atleast 21+ days': '5.54%', '% of children between 3-6", "'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '47.76%'},", "'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '14.60%'}, {'place':", "'value': '14.60%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place':", "test_get_ppr_data_aggregated_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'aggregated',", "performers': [{'place': 'st1', 'value': '64.80%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st7', 'value': '0.00%'}],", "'d1', 'value': '142.40%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'd1', 'value': '1.62%'}], 'Worst", "'1.45%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'd1', 'value': '66.74%'}], 'Worst performers': [{'place':", "{'place': 'st1', 'value': '0.95%'}, {'place': 'st2', 'value': '1.10%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data)", "'8.41%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7',", "for atleast 21+ days': '5.54%', '% of children between 3-6 years provided SNP", "'Worst performers': [{'place': 'd1', 'value': '66.74%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'd1',", "atleast 21+ days': '1.51%', '% of children between 6 months -3 years, P&LW", "months -3 years, P&LW provided THR for atleast 21+ days': '25.32%', '% of", "{ 'aggregation_level': 1, }, False ) expected = {'ICDS CAS Coverage': [[{'Best performers':", "'d1', 'value': '1.62%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [ [{'Best performers': [{'place': 'd1',", "'value': '0.95%'}, {'place': 'st2', 'value': '1.10%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_month(self):", "'value': '1.62%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [ [{'Best performers': [{'place': 'd1', 'value':", "'indicator': 'AWC Open'}, {'Best performers': [{'place': 'st1', 'value': '0.66%'}, {'place': 'st2', 'value': '0.00%'},", "performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st2', 'value': '70.40%'}],", "[{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st1', 'value': '64.80%'}], 'indicator':", "'value': '8.41%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'st2', 'value': '70.40%'}, {'place': 'st1',", "21+ days': '5.54%', '% of children between 3-6 years provided SNP for atleast", "'0.83%'}], 'Worst performers': [{'place': 'd1', 'value': '0.83%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def", "provided THR for atleast 21+ days': '43.65%', '% of trimester three women counselled", "'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st2', 'value':", "{'place': 'st1', 'value': '60.32%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'st2', 'value': '34.75%'}, {'place':", "'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st1', 'value':", "= None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'comparative', { 'aggregation_level':", "children between 3-6 years provided SNP for atleast 21+ days': '1.51%', '% of", "[{'place': 'd1', 'value': '1.45%'}], 'Worst performers': [{'place': 'd1', 'value': '1.45%'}], 'indicator': 'Pre-school Education'},", "'Number of Blocks Covered': 5, 'Number of Districts Covered': 4, 'Number of States", "atleast 21+ days': '6.66%', '% of children between 3-6 years provided SNP for", "None, 2, 'quarter', 'aggregated', { 'aggregation_level': 1, }, False ) expected = {'ICDS", "'value': '60.32%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'st2', 'value': '34.75%'}, {'place': 'st1', 'value':", "'60.32%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'st2', 'value': '34.75%'}, {'place': 'st1', 'value': '14.60%'},", "'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'd1', 'value': '0.83%'}], 'Worst performers': [{'place':", "Delivery': [ [{'Best performers': [{'place': 'd1', 'value': '1.45%'}], 'Worst performers': [{'place': 'd1', 'value':", "'1.45%'}], 'Worst performers': [{'place': 'd1', 'value': '1.45%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place':", "{'ICDS CAS Coverage': {'% Number of Days AWC Were opened': '53.27%', '% of", "[{'Best performers': [{'place': 'd1', 'value': '1.45%'}], 'Worst performers': [{'place': 'd1', 'value': '1.45%'}], 'indicator':", "'value': '0.00%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st1', 'value': '64.80%'}], 'indicator': 'AWC Open'},", "'value': '66.74%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'd1', 'value': '1.47%'}], 'Worst performers':", "'st2', 'value': '2.89%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'st1', 'value': '60.32%'},", "get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'aggregated', { 'aggregation_level': 1, }, False )", "False ) expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'd1', 'value': '142.40%'}],", "'53.27%', '% of Home Visits': '0.32%', 'Number of AWCs Launched': 22, 'Number of", "'value': '0.00%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place':", "'0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st2',", "[{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st2', 'value': '8.41%'}], 'indicator':", "'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '2.52%'}, {'place':", "'3.24%', 'Weighing efficiency': '70.27%'}} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_quarter(self): self.maxDiff = None data =", "'st1', 'value': '60.32%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers':", "AWCs Launched': 22, 'Number of Blocks Covered': 5, 'Number of Districts Covered': 4,", "three women counselled on immediate and EBF': '72.15%', 'Height Measurement Efficiency': '3.24%', 'Weighing", "performers': [{'place': 'd1', 'value': '1.62%'}], 'Worst performers': [{'place': 'd1', 'value': '1.62%'}], 'indicator': 'Home", "None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'aggregated', { 'aggregation_level': 1,", "'1.47%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'd1', 'value': '72.97%'}], 'Worst performers':", "'25.32%', '% of trimester three women counselled on immediate and EBF': '59.09%', 'Height", "[{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st2', 'value': '34.75%'}], 'indicator':", "'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st1', 'value': '60.32%'}], 'indicator': 'Counselling'}],", "'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '67.39%'}, {'place':", "between 3-6 years provided PSE for atleast 21+ days': '5.54%', '% of children", "'st1', 'value': '0.66%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers':", "'Height Measurement Efficiency': '3.24%', 'Weighing efficiency': '70.27%'}} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_quarter(self): self.maxDiff =", "'0.00%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st2', 'value': '2.89%'}], 'indicator': 'Height Measurement Efficiency'},", "Launched': 22, 'Number of Blocks Covered': 5, 'Number of Districts Covered': 4, 'Number", "'0.32%', 'Number of AWCs Launched': 22, 'Number of Blocks Covered': 5, 'Number of", "expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'st1', 'value': '64.80%'}, {'place': 'st2',", "[{'Best performers': [{'place': 'd1', 'value': '1.47%'}], 'Worst performers': [{'place': 'd1', 'value': '1.47%'}], 'indicator':", "{'Best performers': [{'place': 'd1', 'value': '0.83%'}], 'Worst performers': [{'place': 'd1', 'value': '0.83%'}], 'indicator':", "'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '1.44%'}, {'place':", "= {'ICDS CAS Coverage': {'% Number of Days AWC Were opened': '118.18%', '%", "'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st2', 'value': '2.89%'}], 'indicator': 'Height", "'d1', 'value': '66.74%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'd1', 'value': '1.47%'}], 'Worst", "'st1', 'value': '67.39%'}, {'place': 'st2', 'value': '70.40%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place':", "'28.67%'}], 'Worst performers': [{'place': 'd1', 'value': '28.67%'}], 'indicator': 'Take Home Ration'}, {'Best performers':", "'value': '0.00%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st2', 'value': '70.40%'}], 'indicator': 'Weighing efficiency'}],", "'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '57.97%'},", "'value': '0.00%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st2', 'value': '34.75%'}], 'indicator': 'Take Home", "of States Covered': 3}, 'Service Delivery': { '% of children between 3-6 years", "atleast 21+ days': '25.32%', '% of trimester three women counselled on immediate and", "5, 'Number of Districts Covered': 4, 'Number of States Covered': 3}, 'Service Delivery':", "'Home Visits'}]], 'Service Delivery': [[{'Best performers': [{'place': 'st2', 'value': '8.41%'}, {'place': 'st1', 'value':", ") expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'st1', 'value': '64.80%'}, {'place':", "'66.74%'}], 'Worst performers': [{'place': 'd1', 'value': '66.74%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place':", "'0.00%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st2', 'value': '70.40%'}], 'indicator': 'Weighing efficiency'}], [{'Best", "{'place': 'st1', 'value': '0.66%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [[{'Best performers': [{'place': 'st2',", "'value': '1.10%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place':", "21+ days': '6.66%', '% of children between 3-6 years provided SNP for atleast", "[{'place': 'st2', 'value': '70.40%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst", "'st2', 'value': '8.41%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers':", "between 6 months -3 years, P&LW provided THR for atleast 21+ days': '25.32%',", "'47.76%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2',", "'Worst performers': [{'place': 'd1', 'value': '1.62%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [ [{'Best", "'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st2', 'value':", "Covered': 3}, 'Service Delivery': { '% of children between 3-6 years provided PSE", "1, }, False ) expected = {'ICDS CAS Coverage': {'% Number of Days", "'Service Delivery': [[{'Best performers': [{'place': 'st2', 'value': '8.41%'}, {'place': 'st1', 'value': '2.52%'}, {'place':", "None, 2, 'quarter', 'comparative', { 'aggregation_level': 1, }, False ) expected = {'ICDS", "'0.00%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st2', 'value': '8.41%'}], 'indicator': 'Pre-school Education'}, {'Best", "Efficiency': '3.24%', 'Weighing efficiency': '70.27%'}} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_quarter(self): self.maxDiff = None data", "1, }, False ) expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'st1',", "{'Best performers': [{'place': 'd1', 'value': '1.62%'}], 'Worst performers': [{'place': 'd1', 'value': '1.62%'}], 'indicator':", "{'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value':", "THR for atleast 21+ days': '43.65%', '% of trimester three women counselled on", "def test_get_ppr_data_aggregated_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter',", "'Counselling'}], [{'Best performers': [{'place': 'st2', 'value': '34.75%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st7',", "{'place': 'st1', 'value': '67.39%'}, {'place': 'st2', 'value': '70.40%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers':", "}, False ) expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'd1', 'value':", "'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '1.44%'},", "CAS Coverage': [[{'Best performers': [{'place': 'st1', 'value': '64.80%'}, {'place': 'st2', 'value': '47.76%'}, {'place':", "opened': '118.18%', '% of Home Visits': '0.79%', 'Number of AWCs Launched': 22, 'Number", "'Take Home Ration'}, {'Best performers': [{'place': 'st2', 'value': '1.10%'}, {'place': 'st1', 'value': '0.95%'},", "'% of trimester three women counselled on immediate and EBF': '72.15%', 'Height Measurement", "= {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'd1', 'value': '142.40%'}], 'Worst performers': [{'place':", "[[{'Best performers': [{'place': 'd1', 'value': '142.40%'}], 'Worst performers': [{'place': 'd1', 'value': '142.40%'}], 'indicator':", "[{'place': 'd1', 'value': '1.47%'}], 'Worst performers': [{'place': 'd1', 'value': '1.47%'}], 'indicator': 'Height Measurement", "'0.66%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [[{'Best performers': [{'place': 'st2', 'value': '8.41%'}, {'place':", "'Number of AWCs Launched': 22, 'Number of Blocks Covered': 5, 'Number of Districts", "'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st2', 'value':", "'0.00%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st2', 'value': '1.10%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected,", "'d1', 'value': '1.45%'}], 'Worst performers': [{'place': 'd1', 'value': '1.45%'}], 'indicator': 'Pre-school Education'}, {'Best", "'value': '0.83%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_comparative_quarter(self): self.maxDiff = None data", "'st1', 'value': '14.60%'}, {'place': 'st2', 'value': '34.75%'}], 'indicator': 'Take Home Ration'}, {'Best performers':", "{'% Number of Days AWC Were opened': '53.27%', '% of Home Visits': '0.32%',", "'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st2', 'value': '1.10%'}], 'indicator': 'Supplementary", "Coverage': [[{'Best performers': [{'place': 'st1', 'value': '64.80%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st7',", "'st2', 'value': '47.76%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'},", "'6.66%', '% of children between 3-6 years provided SNP for atleast 21+ days':", "2, 'month', 'comparative', { 'aggregation_level': 2, 'state_id': 'st1', }, False ) expected =", "= None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'comparative', { 'aggregation_level':", "'0.00%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st2', 'value': '34.75%'}], 'indicator': 'Take Home Ration'},", "{'place': 'st1', 'value': '2.52%'}, {'place': 'st2', 'value': '8.41%'}], 'indicator': 'Pre-school Education'}, {'Best performers':", "from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data class TestPPDData(TestCase): def test_get_ppr_data_comparative_month(self): self.maxDiff = None data =", "data) def test_get_ppr_data_aggregated_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2,", "'st1', 'value': '2.52%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'},", "[[{'Best performers': [{'place': 'st1', 'value': '64.80%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st7', 'value':", "'60.32%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7',", "2017, 5, 2, 'month', 'comparative', { 'aggregation_level': 2, 'state_id': 'st1', }, False )", "of children between 3-6 years provided PSE for atleast 21+ days': '6.66%', '%", "{'place': 'st2', 'value': '0.00%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value':", "EBF': '72.15%', 'Height Measurement Efficiency': '3.24%', 'Weighing efficiency': '70.27%'}} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_quarter(self):", "'st1', 'value': '64.80%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'st1', 'value': '0.66%'}, {'place':", "'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st1', 'value':", "'st2', 'value': '70.40%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'st2', 'value': '2.89%'}, {'place':", "{'place': 'st2', 'value': '2.89%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'st1', 'value':", "'67.39%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1',", "test_get_ppr_data_aggregated_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'aggregated',", "'icds-cas', 2017, 5, 2, 'month', 'aggregated', { 'aggregation_level': 1, }, False ) expected", "atleast 21+ days': '43.65%', '% of trimester three women counselled on immediate and", "'st1', 'value': '0.95%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'},", "[{'place': 'd1', 'value': '28.67%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'd1', 'value':", "= get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'aggregated', { 'aggregation_level': 1, }, False", "performers': [{'place': 'd1', 'value': '0.83%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_comparative_quarter(self): self.maxDiff", "efficiency': '70.27%'}} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas',", "'st1', 'value': '67.39%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'},", "'43.65%', '% of trimester three women counselled on immediate and EBF': '72.15%', 'Height", "'142.40%'}], 'Worst performers': [{'place': 'd1', 'value': '142.40%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place':", "expected = {'ICDS CAS Coverage': {'% Number of Days AWC Were opened': '53.27%',", "'% of children between 3-6 years provided SNP for atleast 21+ days': '1.08%',", "{'Best performers': [{'place': 'd1', 'value': '66.74%'}], 'Worst performers': [{'place': 'd1', 'value': '66.74%'}], 'indicator':", "'st1', 'value': '1.44%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'},", "of children between 3-6 years provided SNP for atleast 21+ days': '1.08%', '%", "'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data(", "{'place': 'st1', 'value': '1.44%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value':", "'67.39%'}, {'place': 'st2', 'value': '70.40%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'st2', 'value':", "[{'place': 'd1', 'value': '1.62%'}], 'Worst performers': [{'place': 'd1', 'value': '1.62%'}], 'indicator': 'Home Visits'}]],", "{'place': 'st2', 'value': '57.97%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value':", "[{'Best performers': [{'place': 'd1', 'value': '28.67%'}], 'Worst performers': [{'place': 'd1', 'value': '28.67%'}], 'indicator':", "performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st2', 'value': '1.10%'}],", "on immediate and EBF': '72.15%', 'Height Measurement Efficiency': '3.24%', 'Weighing efficiency': '70.27%'}} self.assertDictEqual(expected,", "'st1', 'value': '0.66%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [[{'Best performers': [{'place': 'st2', 'value':", "opened': '53.27%', '% of Home Visits': '0.32%', 'Number of AWCs Launched': 22, 'Number", "'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '67.39%'},", "CAS Coverage': {'% Number of Days AWC Were opened': '118.18%', '% of Home", "for atleast 21+ days': '1.51%', '% of children between 6 months -3 years,", "'value': '2.89%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place':", "'value': '70.40%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'st2', 'value': '2.89%'}, {'place': 'st1',", "'2.52%'}, {'place': 'st2', 'value': '8.41%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'st2', 'value':", "of AWCs Launched': 22, 'Number of Blocks Covered': 5, 'Number of Districts Covered':", "'st2', 'value': '34.75%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'st2', 'value': '1.10%'},", "'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st1', 'value': '64.80%'}], 'indicator': 'AWC", "Home Ration'}, {'Best performers': [{'place': 'st2', 'value': '1.10%'}, {'place': 'st1', 'value': '0.95%'}, {'place':", "'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '47.76%'}, {'place':", "'Service Delivery': [ [{'Best performers': [{'place': 'd1', 'value': '1.45%'}], 'Worst performers': [{'place': 'd1',", "{'place': 'st1', 'value': '1.44%'}, {'place': 'st2', 'value': '2.89%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best", "[[{'Best performers': [{'place': 'st2', 'value': '8.41%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st7', 'value':", "Days AWC Were opened': '53.27%', '% of Home Visits': '0.32%', 'Number of AWCs", "of trimester three women counselled on immediate and EBF': '72.15%', 'Height Measurement Efficiency':", "days': '43.65%', '% of trimester three women counselled on immediate and EBF': '72.15%',", "performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st2', 'value': '2.89%'}],", "'0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st2',", "'Worst performers': [{'place': 'd1', 'value': '142.40%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'd1',", "22, 'Number of Blocks Covered': 5, 'Number of Districts Covered': 4, 'Number of", "Coverage': {'% Number of Days AWC Were opened': '53.27%', '% of Home Visits':", "'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas',", "{'Best performers': [{'place': 'd1', 'value': '72.97%'}], 'Worst performers': [{'place': 'd1', 'value': '72.97%'}], 'indicator':", "'d1', 'value': '72.97%'}], 'Worst performers': [{'place': 'd1', 'value': '72.97%'}], 'indicator': 'Counselling'}], [{'Best performers':", "'value': '2.89%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'st1', 'value': '60.32%'}, {'place':", "atleast 21+ days': '5.54%', '% of children between 3-6 years provided SNP for", "performers': [{'place': 'd1', 'value': '1.47%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'd1',", "of children between 6 months -3 years, P&LW provided THR for atleast 21+", "3-6 years provided PSE for atleast 21+ days': '6.66%', '% of children between", "expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'd1', 'value': '142.40%'}], 'Worst performers':", "'57.97%'}, {'place': 'st1', 'value': '60.32%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'st2', 'value': '34.75%'},", "children between 3-6 years provided PSE for atleast 21+ days': '5.54%', '% of", "'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_comparative_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data(", "2, 'quarter', 'aggregated', { 'aggregation_level': 1, }, False ) expected = {'ICDS CAS", "{'place': 'st2', 'value': '1.10%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_month(self): self.maxDiff =", "performers': [{'place': 'd1', 'value': '142.40%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'd1', 'value':", "2017, None, 2, 'quarter', 'comparative', { 'aggregation_level': 1, }, False ) expected =", "'0.00%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st1', 'value': '64.80%'}], 'indicator': 'AWC Open'}, {'Best", "between 3-6 years provided SNP for atleast 21+ days': '1.51%', '% of children", "-3 years, P&LW provided THR for atleast 21+ days': '25.32%', '% of trimester", "6 months -3 years, P&LW provided THR for atleast 21+ days': '43.65%', '%", "'% of children between 3-6 years provided PSE for atleast 21+ days': '5.54%',", "'indicator': 'Counselling'}], [{'Best performers': [{'place': 'st2', 'value': '34.75%'}, {'place': 'st1', 'value': '14.60%'}, {'place':", "'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '2.52%'},", "[ [{'Best performers': [{'place': 'd1', 'value': '1.45%'}], 'Worst performers': [{'place': 'd1', 'value': '1.45%'}],", "Ration'}, {'Best performers': [{'place': 'd1', 'value': '0.83%'}], 'Worst performers': [{'place': 'd1', 'value': '0.83%'}],", "Visits'}]], 'Service Delivery': [ [{'Best performers': [{'place': 'd1', 'value': '1.45%'}], 'Worst performers': [{'place':", "'1.10%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_month(self): self.maxDiff = None data =", "'month', 'aggregated', { 'aggregation_level': 1, }, False ) expected = {'ICDS CAS Coverage':", "P&LW provided THR for atleast 21+ days': '43.65%', '% of trimester three women", "[{'place': 'st2', 'value': '2.89%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst", "months -3 years, P&LW provided THR for atleast 21+ days': '43.65%', '% of", "False ) expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'st1', 'value': '64.80%'},", "for atleast 21+ days': '1.08%', '% of children between 6 months -3 years,", "{'Best performers': [{'place': 'st1', 'value': '60.32%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st7', 'value':", "'value': '0.00%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st1', 'value': '60.32%'}], 'indicator': 'Counselling'}], [{'Best", "{'place': 'st1', 'value': '2.52%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value':", "performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st2', 'value': '34.75%'}],", "women counselled on immediate and EBF': '72.15%', 'Height Measurement Efficiency': '3.24%', 'Weighing efficiency':", "[{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st2', 'value': '70.40%'}], 'indicator':", "get_poshan_progress_dashboard_data class TestPPDData(TestCase): def test_get_ppr_data_comparative_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017,", "4, 'Number of States Covered': 3}, 'Service Delivery': { '% of children between", "'66.74%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'd1', 'value': '1.47%'}], 'Worst performers': [{'place':", "{'% Number of Days AWC Were opened': '118.18%', '% of Home Visits': '0.79%',", "'14.60%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1',", "[{'place': 'd1', 'value': '1.47%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'd1', 'value':", "performers': [{'place': 'd1', 'value': '1.45%'}], 'Worst performers': [{'place': 'd1', 'value': '1.45%'}], 'indicator': 'Pre-school", "'Counselling'}], [{'Best performers': [{'place': 'd1', 'value': '28.67%'}], 'Worst performers': [{'place': 'd1', 'value': '28.67%'}],", "21+ days': '1.51%', '% of children between 6 months -3 years, P&LW provided", ") expected = {'ICDS CAS Coverage': [[{'Best performers': [{'place': 'd1', 'value': '142.40%'}], 'Worst", "'value': '28.67%'}], 'indicator': 'Take Home Ration'}, {'Best performers': [{'place': 'd1', 'value': '0.83%'}], 'Worst", "'2.89%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'st1', 'value': '60.32%'}, {'place': 'st2',", "data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'aggregated', { 'aggregation_level': 1, },", "django.test import TestCase from custom.icds_reports.reports.poshan_progress_dashboard_data import get_poshan_progress_dashboard_data class TestPPDData(TestCase): def test_get_ppr_data_comparative_month(self): self.maxDiff =", "'0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st2',", "'value': '1.10%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_month(self): self.maxDiff = None data", "AWC Were opened': '118.18%', '% of Home Visits': '0.79%', 'Number of AWCs Launched':", "of Districts Covered': 4, 'Number of States Covered': 3}, 'Service Delivery': { '%", "CAS Coverage': [[{'Best performers': [{'place': 'd1', 'value': '142.40%'}], 'Worst performers': [{'place': 'd1', 'value':", "{ 'aggregation_level': 2, 'state_id': 'st1', }, False ) expected = {'ICDS CAS Coverage':", "'quarter', 'comparative', { 'aggregation_level': 1, }, False ) expected = {'ICDS CAS Coverage':", "Education'}, {'Best performers': [{'place': 'st2', 'value': '70.40%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st7',", "performers': [{'place': 'd1', 'value': '28.67%'}], 'Worst performers': [{'place': 'd1', 'value': '28.67%'}], 'indicator': 'Take", "[{'place': 'st1', 'value': '60.32%'}, {'place': 'st2', 'value': '57.97%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst", "= get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'aggregated', { 'aggregation_level': 1, }, False", "'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '0.00%'}, {'place':", "= get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'comparative', { 'aggregation_level': 2, 'state_id': 'st1',", "Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017,", "3-6 years provided SNP for atleast 21+ days': '1.51%', '% of children between", "{'place': 'st1', 'value': '64.80%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'st1', 'value': '0.66%'},", "'quarter', 'aggregated', { 'aggregation_level': 1, }, False ) expected = {'ICDS CAS Coverage':", "'icds-cas', 2017, 5, 2, 'month', 'comparative', { 'aggregation_level': 2, 'state_id': 'st1', }, False", "for atleast 21+ days': '43.65%', '% of trimester three women counselled on immediate", "'1.08%', '% of children between 6 months -3 years, P&LW provided THR for", "'st2', 'value': '57.97%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'},", "'value': '0.00%'}, {'place': 'st1', 'value': '1.44%'}, {'place': 'st2', 'value': '2.89%'}], 'indicator': 'Height Measurement", "'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_comparative_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas',", "days': '5.54%', '% of children between 3-6 years provided SNP for atleast 21+", "'0.00%'}, {'place': 'st1', 'value': '0.66%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [[{'Best performers': [{'place':", "'1.10%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7',", "[{'place': 'd1', 'value': '72.97%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'd1', 'value': '28.67%'}], 'Worst", "'st1', 'value': '2.52%'}, {'place': 'st2', 'value': '8.41%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place':", "{'place': 'st2', 'value': '57.97%'}, {'place': 'st1', 'value': '60.32%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place':", "'value': '142.40%'}], 'Worst performers': [{'place': 'd1', 'value': '142.40%'}], 'indicator': 'AWC Open'}, {'Best performers':", "'Height Measurement Efficiency'}, {'Best performers': [{'place': 'st1', 'value': '60.32%'}, {'place': 'st2', 'value': '57.97%'},", "self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'aggregated', {", "'indicator': 'AWC Open'}, {'Best performers': [{'place': 'd1', 'value': '1.62%'}], 'Worst performers': [{'place': 'd1',", "'64.80%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7',", "'st1', 'value': '64.80%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers':", "'indicator': 'Counselling'}], [{'Best performers': [{'place': 'd1', 'value': '28.67%'}], 'Worst performers': [{'place': 'd1', 'value':", "Number of Days AWC Were opened': '118.18%', '% of Home Visits': '0.79%', 'Number", "'value': '47.76%'}, {'place': 'st1', 'value': '64.80%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'st1',", "'st2', 'value': '70.40%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers':", "'value': '1.45%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'd1', 'value': '66.74%'}], 'Worst performers':", "'Service Delivery': { '% of children between 3-6 years provided PSE for atleast", "{'place': 'st1', 'value': '67.39%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value':", "'st2', 'value': '34.75%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers':", "Home Ration'}, {'Best performers': [{'place': 'd1', 'value': '0.83%'}], 'Worst performers': [{'place': 'd1', 'value':", "performers': [{'place': 'd1', 'value': '1.62%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [ [{'Best performers':", "Ration'}, {'Best performers': [{'place': 'st2', 'value': '1.10%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st7',", "'72.97%'}], 'Worst performers': [{'place': 'd1', 'value': '72.97%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'd1',", "'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'st1', 'value': '60.32%'}, {'place': 'st2', 'value':", "provided SNP for atleast 21+ days': '1.08%', '% of children between 6 months", "performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st1', 'value': '0.66%'}],", "21+ days': '1.08%', '% of children between 6 months -3 years, P&LW provided", "None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'comparative', { 'aggregation_level': 2,", "'st1', 'value': '14.60%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'},", "'value': '0.66%'}], 'indicator': 'Home Visits'}]], 'Service Delivery': [[{'Best performers': [{'place': 'st2', 'value': '8.41%'},", "'st2', 'value': '1.10%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_aggregated_month(self): self.maxDiff = None", "'st2', 'value': '0.00%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'},", "{'place': 'st1', 'value': '14.60%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value':", "None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'comparative', { 'aggregation_level': 1,", "'70.40%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'st2', 'value': '2.89%'}, {'place': 'st1', 'value':", "[{'place': 'st2', 'value': '1.10%'}, {'place': 'st1', 'value': '0.95%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst", "Days AWC Were opened': '118.18%', '% of Home Visits': '0.79%', 'Number of AWCs", "Visits': '0.79%', 'Number of AWCs Launched': 22, 'Number of Blocks Covered': 5, 'Number", "TestPPDData(TestCase): def test_get_ppr_data_comparative_month(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2,", "{'place': 'st1', 'value': '14.60%'}, {'place': 'st2', 'value': '34.75%'}], 'indicator': 'Take Home Ration'}, {'Best", "{'ICDS CAS Coverage': [[{'Best performers': [{'place': 'd1', 'value': '142.40%'}], 'Worst performers': [{'place': 'd1',", "performers': [{'place': 'st2', 'value': '70.40%'}, {'place': 'st1', 'value': '67.39%'}, {'place': 'st7', 'value': '0.00%'}],", "'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'd1', 'value': '72.97%'}], 'Worst performers': [{'place':", "'value': '0.66%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place':", "{ '% of children between 3-6 years provided PSE for atleast 21+ days':", "3}, 'Service Delivery': { '% of children between 3-6 years provided PSE for", "[{'place': 'st1', 'value': '64.80%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst", "'st2', 'value': '57.97%'}, {'place': 'st1', 'value': '60.32%'}], 'indicator': 'Counselling'}], [{'Best performers': [{'place': 'st2',", "'value': '0.00%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st1', 'value': '0.66%'}], 'indicator': 'Home Visits'}]],", "'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '2.52%'}, {'place': 'st2', 'value': '8.41%'}], 'indicator': 'Pre-school", "[{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '0.00%'}, {'place': 'st1', 'value': '0.66%'}], 'indicator':", "immediate and EBF': '59.09%', 'Height Measurement Efficiency': '2.24%', 'Weighing efficiency': '68.81%'}} self.assertDictEqual(expected, data)", "performers': [{'place': 'd1', 'value': '1.47%'}], 'Worst performers': [{'place': 'd1', 'value': '1.47%'}], 'indicator': 'Height", "'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '57.97%'}, {'place':", "'1.51%', '% of children between 6 months -3 years, P&LW provided THR for", "'comparative', { 'aggregation_level': 1, }, False ) expected = {'ICDS CAS Coverage': [[{'Best", "'value': '2.52%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place':", "'value': '67.39%'}, {'place': 'st2', 'value': '70.40%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'st2',", "days': '6.66%', '% of children between 3-6 years provided SNP for atleast 21+", "'d1', 'value': '1.47%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place': 'd1', 'value': '72.97%'}],", "'value': '64.80%'}], 'indicator': 'AWC Open'}, {'Best performers': [{'place': 'st1', 'value': '0.66%'}, {'place': 'st2',", "'value': '0.83%'}], 'Worst performers': [{'place': 'd1', 'value': '0.83%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data)", "'Worst performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st2', 'value':", "counselled on immediate and EBF': '59.09%', 'Height Measurement Efficiency': '2.24%', 'Weighing efficiency': '68.81%'}}", "between 3-6 years provided PSE for atleast 21+ days': '6.66%', '% of children", "{'place': 'st2', 'value': '70.40%'}], 'indicator': 'Weighing efficiency'}], [{'Best performers': [{'place': 'st2', 'value': '2.89%'},", "performers': [{'place': 'st7', 'value': '0.00%'}, {'place': 'st2', 'value': '47.76%'}, {'place': 'st1', 'value': '64.80%'}],", "test_get_ppr_data_comparative_quarter(self): self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, None, 2, 'quarter', 'comparative',", "[{'place': 'st2', 'value': '34.75%'}, {'place': 'st1', 'value': '14.60%'}, {'place': 'st7', 'value': '0.00%'}], 'Worst", "'Worst performers': [{'place': 'd1', 'value': '1.47%'}], 'indicator': 'Height Measurement Efficiency'}, {'Best performers': [{'place':", "'Worst performers': [{'place': 'd1', 'value': '0.83%'}], 'indicator': 'Supplementary Nutrition'}]]} self.assertDictEqual(expected, data) def test_get_ppr_data_comparative_quarter(self):", "self.maxDiff = None data = get_poshan_progress_dashboard_data( 'icds-cas', 2017, 5, 2, 'month', 'comparative', {", "'d1', 'value': '142.40%'}], 'Worst performers': [{'place': 'd1', 'value': '142.40%'}], 'indicator': 'AWC Open'}, {'Best", "[{'place': 'd1', 'value': '0.83%'}], 'Worst performers': [{'place': 'd1', 'value': '0.83%'}], 'indicator': 'Supplementary Nutrition'}]]}", "'aggregated', { 'aggregation_level': 1, }, False ) expected = {'ICDS CAS Coverage': {'%", "'Height Measurement Efficiency'}, {'Best performers': [{'place': 'd1', 'value': '72.97%'}], 'Worst performers': [{'place': 'd1',", "'d1', 'value': '1.45%'}], 'indicator': 'Pre-school Education'}, {'Best performers': [{'place': 'd1', 'value': '66.74%'}], 'Worst" ]
[ "visvis way. Will run in interactive mode when used in IEP or IPython.", "run in interactive mode when used in IEP or IPython. app.Create() m =", "in IEP or IPython. app.Create() m = MainWindow() app.Run() else: # The native", "a button but = fltk.Fl_Button(10,10,70,30, 'Click me') but.callback(self._Plot) # Make figure to draw", "= vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") # Make box for resizing box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box)", "me') but.callback(self._Plot) # Make figure to draw stuff in self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\")", "resizing box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box) box.hide() # Finish self.end() self.show() self.fig._widget.show() def", "fltk import visvis as vv # Create a visvis app instance, which wraps", "the application and start the main loop if True: # The visvis way.", "figure in an FLTK application. \"\"\" import fltk import visvis as vv #", "a panel with a button but = fltk.Fl_Button(10,10,70,30, 'Click me') but.callback(self._Plot) # Make", "only one figure, this is not necessary. #vv.figure(self.fig.nr) # Clear it vv.clf() #", "__init__(self): fltk.Fl_Window.__init__(self, 560, 420, \"Embedding in FLTK\") # Make a panel with a", "embedding a visvis figure in an FLTK application. \"\"\" import fltk import visvis", "\"\"\" This example illustrates embedding a visvis figure in an FLTK application. \"\"\"", "in self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") # Make box for resizing box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50,", "# Clear it vv.clf() # Plot vv.plot([1,2,3,1,6]) vv.legend(['this is a line']) # Two", "Finish self.end() self.show() self.fig._widget.show() def _Plot(self, event): # Make sure our figure is", "start the main loop if True: # The visvis way. Will run in", "vv.use('fltk') class MainWindow(fltk.Fl_Window): def __init__(self): fltk.Fl_Window.__init__(self, 560, 420, \"Embedding in FLTK\") # Make", "application object. # This needs to be done *before* instantiating the main window.", "True: # The visvis way. Will run in interactive mode when used in", "# This needs to be done *before* instantiating the main window. app =", "560-110,420-60,\"\") self.resizable(box) box.hide() # Finish self.end() self.show() self.fig._widget.show() def _Plot(self, event): # Make", "create the application and start the main loop if True: # The visvis", "application. \"\"\" import fltk import visvis as vv # Create a visvis app", "Make a panel with a button but = fltk.Fl_Button(10,10,70,30, 'Click me') but.callback(self._Plot) #", "application and start the main loop if True: # The visvis way. Will", "is the active one # If only one figure, this is not necessary.", "a visvis figure in an FLTK application. \"\"\" import fltk import visvis as", "= fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box) box.hide() # Finish self.end() self.show() self.fig._widget.show() def _Plot(self, event):", "app.Create() m = MainWindow() app.Run() else: # The native way. m = MainWindow()", "def _Plot(self, event): # Make sure our figure is the active one #", "wraps an fltk application object. # This needs to be done *before* instantiating", "'Click me') but.callback(self._Plot) # Make figure to draw stuff in self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20,", "is a line']) # Two ways to create the application and start the", "Clear it vv.clf() # Plot vv.plot([1,2,3,1,6]) vv.legend(['this is a line']) # Two ways", "example illustrates embedding a visvis figure in an FLTK application. \"\"\" import fltk", "self.resizable(box) box.hide() # Finish self.end() self.show() self.fig._widget.show() def _Plot(self, event): # Make sure", "Make sure our figure is the active one # If only one figure,", "*before* instantiating the main window. app = vv.use('fltk') class MainWindow(fltk.Fl_Window): def __init__(self): fltk.Fl_Window.__init__(self,", "or IPython. app.Create() m = MainWindow() app.Run() else: # The native way. m", "one # If only one figure, this is not necessary. #vv.figure(self.fig.nr) # Clear", "the main loop if True: # The visvis way. Will run in interactive", "way. Will run in interactive mode when used in IEP or IPython. app.Create()", "fltk application object. # This needs to be done *before* instantiating the main", "but = fltk.Fl_Button(10,10,70,30, 'Click me') but.callback(self._Plot) # Make figure to draw stuff in", "\"Embedding in FLTK\") # Make a panel with a button but = fltk.Fl_Button(10,10,70,30,", "box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box) box.hide() # Finish self.end() self.show() self.fig._widget.show() def _Plot(self,", "m = MainWindow() app.Run() else: # The native way. m = MainWindow() fltk.Fl.run()", "figure is the active one # If only one figure, this is not", "a line']) # Two ways to create the application and start the main", "self.end() self.show() self.fig._widget.show() def _Plot(self, event): # Make sure our figure is the", "# Finish self.end() self.show() self.fig._widget.show() def _Plot(self, event): # Make sure our figure", "illustrates embedding a visvis figure in an FLTK application. \"\"\" import fltk import", "instance, which wraps an fltk application object. # This needs to be done", "Create a visvis app instance, which wraps an fltk application object. # This", "box for resizing box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box) box.hide() # Finish self.end() self.show()", "this is not necessary. #vv.figure(self.fig.nr) # Clear it vv.clf() # Plot vv.plot([1,2,3,1,6]) vv.legend(['this", "# Plot vv.plot([1,2,3,1,6]) vv.legend(['this is a line']) # Two ways to create the", "button but = fltk.Fl_Button(10,10,70,30, 'Click me') but.callback(self._Plot) # Make figure to draw stuff", "app instance, which wraps an fltk application object. # This needs to be", "the active one # If only one figure, this is not necessary. #vv.figure(self.fig.nr)", "# The visvis way. Will run in interactive mode when used in IEP", "used in IEP or IPython. app.Create() m = MainWindow() app.Run() else: # The", "it vv.clf() # Plot vv.plot([1,2,3,1,6]) vv.legend(['this is a line']) # Two ways to", "if True: # The visvis way. Will run in interactive mode when used", "_Plot(self, event): # Make sure our figure is the active one # If", "# Make sure our figure is the active one # If only one", "when used in IEP or IPython. app.Create() m = MainWindow() app.Run() else: #", "done *before* instantiating the main window. app = vv.use('fltk') class MainWindow(fltk.Fl_Window): def __init__(self):", "The visvis way. Will run in interactive mode when used in IEP or", "to draw stuff in self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") # Make box for resizing", "Make box for resizing box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box) box.hide() # Finish self.end()", "# Make box for resizing box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box) box.hide() # Finish", "Plot vv.plot([1,2,3,1,6]) vv.legend(['this is a line']) # Two ways to create the application", "to be done *before* instantiating the main window. app = vv.use('fltk') class MainWindow(fltk.Fl_Window):", "window. app = vv.use('fltk') class MainWindow(fltk.Fl_Window): def __init__(self): fltk.Fl_Window.__init__(self, 560, 420, \"Embedding in", "line']) # Two ways to create the application and start the main loop", "python \"\"\" This example illustrates embedding a visvis figure in an FLTK application.", "visvis as vv # Create a visvis app instance, which wraps an fltk", "self.show() self.fig._widget.show() def _Plot(self, event): # Make sure our figure is the active", "Make figure to draw stuff in self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") # Make box", "not necessary. #vv.figure(self.fig.nr) # Clear it vv.clf() # Plot vv.plot([1,2,3,1,6]) vv.legend(['this is a", "IPython. app.Create() m = MainWindow() app.Run() else: # The native way. m =", "# Make a panel with a button but = fltk.Fl_Button(10,10,70,30, 'Click me') but.callback(self._Plot)", "the main window. app = vv.use('fltk') class MainWindow(fltk.Fl_Window): def __init__(self): fltk.Fl_Window.__init__(self, 560, 420,", "ways to create the application and start the main loop if True: #", "This needs to be done *before* instantiating the main window. app = vv.use('fltk')", "# Create a visvis app instance, which wraps an fltk application object. #", "instantiating the main window. app = vv.use('fltk') class MainWindow(fltk.Fl_Window): def __init__(self): fltk.Fl_Window.__init__(self, 560,", "with a button but = fltk.Fl_Button(10,10,70,30, 'Click me') but.callback(self._Plot) # Make figure to", "for resizing box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box) box.hide() # Finish self.end() self.show() self.fig._widget.show()", "fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box) box.hide() # Finish self.end() self.show() self.fig._widget.show() def _Plot(self, event): #", "figure, this is not necessary. #vv.figure(self.fig.nr) # Clear it vv.clf() # Plot vv.plot([1,2,3,1,6])", "self.fig._widget.show() def _Plot(self, event): # Make sure our figure is the active one", "be done *before* instantiating the main window. app = vv.use('fltk') class MainWindow(fltk.Fl_Window): def", "visvis figure in an FLTK application. \"\"\" import fltk import visvis as vv", "our figure is the active one # If only one figure, this is", "560, 420, \"Embedding in FLTK\") # Make a panel with a button but", "IEP or IPython. app.Create() m = MainWindow() app.Run() else: # The native way.", "def __init__(self): fltk.Fl_Window.__init__(self, 560, 420, \"Embedding in FLTK\") # Make a panel with", "stuff in self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") # Make box for resizing box =", "vv # Create a visvis app instance, which wraps an fltk application object.", "object. # This needs to be done *before* instantiating the main window. app", "and start the main loop if True: # The visvis way. Will run", "in an FLTK application. \"\"\" import fltk import visvis as vv # Create", "import visvis as vv # Create a visvis app instance, which wraps an", "a visvis app instance, which wraps an fltk application object. # This needs", "but.callback(self._Plot) # Make figure to draw stuff in self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") #", "main window. app = vv.use('fltk') class MainWindow(fltk.Fl_Window): def __init__(self): fltk.Fl_Window.__init__(self, 560, 420, \"Embedding", "as vv # Create a visvis app instance, which wraps an fltk application", "FLTK application. \"\"\" import fltk import visvis as vv # Create a visvis", "fltk.Fl_Window.__init__(self, 560, 420, \"Embedding in FLTK\") # Make a panel with a button", "needs to be done *before* instantiating the main window. app = vv.use('fltk') class", "# Two ways to create the application and start the main loop if", "sure our figure is the active one # If only one figure, this", "fltk.Fl_Button(10,10,70,30, 'Click me') but.callback(self._Plot) # Make figure to draw stuff in self.fig =", "import fltk import visvis as vv # Create a visvis app instance, which", "active one # If only one figure, this is not necessary. #vv.figure(self.fig.nr) #", "\"\"\" import fltk import visvis as vv # Create a visvis app instance,", "is not necessary. #vv.figure(self.fig.nr) # Clear it vv.clf() # Plot vv.plot([1,2,3,1,6]) vv.legend(['this is", "an FLTK application. \"\"\" import fltk import visvis as vv # Create a", "necessary. #vv.figure(self.fig.nr) # Clear it vv.clf() # Plot vv.plot([1,2,3,1,6]) vv.legend(['this is a line'])", "mode when used in IEP or IPython. app.Create() m = MainWindow() app.Run() else:", "in FLTK\") # Make a panel with a button but = fltk.Fl_Button(10,10,70,30, 'Click", "event): # Make sure our figure is the active one # If only", "which wraps an fltk application object. # This needs to be done *before*", "# If only one figure, this is not necessary. #vv.figure(self.fig.nr) # Clear it", "app = vv.use('fltk') class MainWindow(fltk.Fl_Window): def __init__(self): fltk.Fl_Window.__init__(self, 560, 420, \"Embedding in FLTK\")", "Two ways to create the application and start the main loop if True:", "\"\") # Make box for resizing box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box) box.hide() #", "an fltk application object. # This needs to be done *before* instantiating the", "# Make figure to draw stuff in self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") # Make", "= fltk.Fl_Button(10,10,70,30, 'Click me') but.callback(self._Plot) # Make figure to draw stuff in self.fig", "#!/usr/bin/env python \"\"\" This example illustrates embedding a visvis figure in an FLTK", "class MainWindow(fltk.Fl_Window): def __init__(self): fltk.Fl_Window.__init__(self, 560, 420, \"Embedding in FLTK\") # Make a", "420, \"Embedding in FLTK\") # Make a panel with a button but =", "to create the application and start the main loop if True: # The", "in interactive mode when used in IEP or IPython. app.Create() m = MainWindow()", "#vv.figure(self.fig.nr) # Clear it vv.clf() # Plot vv.plot([1,2,3,1,6]) vv.legend(['this is a line']) #", "figure to draw stuff in self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") # Make box for", "vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") # Make box for resizing box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\") self.resizable(box) box.hide()", "vv.legend(['this is a line']) # Two ways to create the application and start", "Will run in interactive mode when used in IEP or IPython. app.Create() m", "= vv.use('fltk') class MainWindow(fltk.Fl_Window): def __init__(self): fltk.Fl_Window.__init__(self, 560, 420, \"Embedding in FLTK\") #", "If only one figure, this is not necessary. #vv.figure(self.fig.nr) # Clear it vv.clf()", "loop if True: # The visvis way. Will run in interactive mode when", "vv.plot([1,2,3,1,6]) vv.legend(['this is a line']) # Two ways to create the application and", "vv.clf() # Plot vv.plot([1,2,3,1,6]) vv.legend(['this is a line']) # Two ways to create", "interactive mode when used in IEP or IPython. app.Create() m = MainWindow() app.Run()", "This example illustrates embedding a visvis figure in an FLTK application. \"\"\" import", "one figure, this is not necessary. #vv.figure(self.fig.nr) # Clear it vv.clf() # Plot", "draw stuff in self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") # Make box for resizing box", "box.hide() # Finish self.end() self.show() self.fig._widget.show() def _Plot(self, event): # Make sure our", "panel with a button but = fltk.Fl_Button(10,10,70,30, 'Click me') but.callback(self._Plot) # Make figure", "self.fig = vv.backends.backend_fltk.Figure(100,10,560-110,420-20, \"\") # Make box for resizing box = fltk.Fl_Box(fltk.FL_NO_BOX,100,50, 560-110,420-60,\"\")", "FLTK\") # Make a panel with a button but = fltk.Fl_Button(10,10,70,30, 'Click me')", "main loop if True: # The visvis way. Will run in interactive mode", "visvis app instance, which wraps an fltk application object. # This needs to", "MainWindow(fltk.Fl_Window): def __init__(self): fltk.Fl_Window.__init__(self, 560, 420, \"Embedding in FLTK\") # Make a panel" ]
[ "raise NotImplementedError def quat_to_axis_angle(q): angle = 2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2) if angle >", "axis[1]*axis[1]*icos + cos R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin", "translate(v, delta): ''' Translates vectors inplace by delta. ''' n = v.shape[0] for", "u2]), angle, normalize=True) def shift_vector_axis_angle(v, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward)", "quat_to_axis_angle(q): angle = 2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2) if angle > 0.0: if angle", "surface of a unit sphere. Algorithm from Allen & Tildesley p. 349. '''", "= np.empty((3,3)) q0sq = q[0]**2 q1sq = q[1]**2 q2sq = q[2]**2 q3sq =", "shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_euler(a, euler, forward=False):", "Conjugates a quaternion and returns a copy. ''' p = np.copy(q) p[1:4] =", "cosine matrix of axes(i.e. frame) B w.r.t. axes(i.e. frame) A. Parameters ---------- A", "of frame B. Returns ------- (3,3) ndarray The dcm of frame B w.r.t.", "euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return", "np.random.random((4,)) return normalize_quat(q) def get_identity_quat(): return np.array([1.0, 0.0, 0.0, 0.0]) def get_rand_quat(): axis,", "p2, p3 = tuple(p) prod_mat = np.array([[p0, -p1, -p2, -p3], [p1, p0, -p3,", "== 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] euler = axis_angle_to_euler(axis, angle, seq=to_seq,", "2*math.pi) if angle < 0.0: angle = -angle axis = -axis if angle", "euler_to_axis_angle(euler, seq=seq, world=world) return axis_angle_to_quat(axis, angle) def euler_to_dcm(euler, seq='XYZ', world=True): dcm = get_shiftmat_euler(euler,", "the third #rotation about X-axis of the body frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle,", "not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14): axis /= norm angle = math.fmod(angle, 2*math.pi) if", "= math.sin(angle/2)*axis q = np.array([w, v[0], v[1], v[2]]) return normalize_quat(q) def axis_angle_to_euler(axis, angle,", "-p3, p2], [p2, p3, p0, -p1], [p3, -p2, p1, p0]]) pq = normalize_quat(np.dot(prod_mat,", "dcm of frame B w.r.t. frame A. ''' return np.dot(B, A.T) def dcm_to_quat(dcm):", "quat = axis_angle_to_quat(axis, angle) elif ori_repr == 'dcm': quat = dcm_to_quat(orientation['dcm']) else: raise", "= s*rotmat[0,1] u2 = s*rotmat[0,2] elif k == 1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s", "if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14): axis /= norm angle = math.fmod(angle, 2*math.pi)", "w.r.t. axes(i.e. frame) A. Parameters ---------- A : (3,3) ndarray The rows of", "= q[1]*q[3] q2q3 = q[2]*q[3] rotmat[0,0] = 2*(q0sq + q1sq) - 1.0 rotmat[0,1]", "ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) elif ori_repr ==", "= 0.0 u2 = 0.0 return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True) def shift_vector_axis_angle(v,", "= get_rotmat_dcm(dcm) return np.dot(v, rotmat.T) def get_rotmat_dcm(dcm): return dcm.T def shift_vector_dcm(v, dcm, forward=False):", "np.array(orientation['quat']) dcm = quat_to_dcm(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq =", "= get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat = get_rotmat_quat(q) return shiftmat def conjugate_quat(q): ''' Conjugates a", "#Find the largest entry in the diagonal of rotmat k = np.argmax(np.diag(rotmat)) if", "q0q2) rotmat[1,0] = 2*(q1q2 + q0q3) rotmat[1,1] = 2*(q0sq + q2sq) - 1.0", "ang_vel_to_quat_deriv_mat(q): q0, q1, q2, q3 = tuple(q) return 0.5*np.array([[-q1, -q2, -q3], [ q0,", "eulang.factor_rotmat(rotmat, seq=seq, world=world) def euler_to_euler(euler, seq, world, to_seq, to_world): rotmat = get_rotmat_euler(euler, seq=seq,", "used in Blender. #In contrast, the XYZ sequence is understood in the Aerospace", "get_rand_axis_angle(): ''' Generates a random pair of axis-angle. The axis is a random", "q0, q1, q2, q3 = tuple(q) return 0.5*np.array([[-q1, -q2, -q3], [ q0, q3,", "normalize=True) def quat_to_euler(q, seq='XYZ', world=True): rotmat = get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq, world=world) def", "world=world) return factorize_rotmat(rotmat, seq=to_seq, world=to_world) def euler_to_quat(euler, seq='XYZ', world=True): axis, angle = euler_to_axis_angle(euler,", "= 0.0 return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True) def shift_vector_axis_angle(v, axis, angle, forward=False):", ". import eulang #Euler angle sequence: XYZ (world). First rotation about X, second", "= dcm_from_axes(old, new) return rotate_vector_dcm(v, dcm) def mat_is_dcm(mat): return mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one", "shiftmat.T) def shift_tensor2_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat,", "shiftmat, a) def get_shiftmat_dcm(dcm, forward=False): shiftmat = dcm if not forward: shiftmat =", "world=to_world) elif ori_repr == 'dcm': euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else: raise ValueError(", "about Z axis of the world(i.e. fixed) frame. #This is the same as", "= np.array(orientation['axis']) angle = orientation['angle'] dcm = axis_angle_to_dcm(axis, angle) elif ori_repr == 'dcm':", "of rotmat k = np.argmax(np.diag(rotmat)) if k == 0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s", "np.dot(v, rotmat.T) def get_rotmat_quat(q): rotmat = np.empty((3,3)) q0sq = q[0]**2 q1sq = q[1]**2", "elif k == 1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1) u0 = s*rotmat[0,1]", "angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a, axis,", "np.dot(v, shiftmat.T) def shift_tensor2_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq', shiftmat,", "dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return quat def rotate_vector_quat(v, q):", "about X-axis of the body frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle, normalize=True): if normalize:", "s*rotmat[1,2] else: u0 = 1.0 u1 = 0.0 u2 = 0.0 return fix_axis_angle(np.array([u0,", "1.0 rotmat[0,1] = 2*(q1q2 - q0q3) rotmat[0,2] = 2*(q1q3 + q0q2) rotmat[1,0] =", "quaternion and returns a copy. ''' p = np.copy(q) p[1:4] = -p[1:4] return", "= 1.0/(2*u0) u1 = s*rotmat[0,1] u2 = s*rotmat[0,2] elif k == 1: u1", "qdot): mat = quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q): q0, q1, q2, q3", "Allen & Tildesley p. 349. ''' axis = np.zeros((3,)) #Generate angle: A uniform", "Normalizes a quaternion in-place. ''' q /= np.linalg.norm(q) return q def get_normalized_quat(q): '''", "1.0 u1 = 0.0 u2 = 0.0 return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True)", "np.linalg.norm(axis) if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14): axis /= norm angle = math.fmod(angle,", "seq='XYZ', world=True, forward=False): rotmat = get_rotmat_euler(euler, seq=seq, world=world) if forward: shiftmat = rotmat.T", "sphere. Algorithm from Allen & Tildesley p. 349. ''' axis = np.zeros((3,)) #Generate", "axis = np.zeros((3,)) #Generate angle: A uniform random number from [0.0, 2*pi) angle", "the orthonormal basis vectors of frame B. Returns ------- (3,3) ndarray The dcm", "angle) def get_perturbed_quat(q): raise NotImplementedError def quat_to_axis_angle(q): angle = 2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2)", "elif ori_repr == 'dcm': axis, angle = dcm_to_axis_angle(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation", "-q2], [-q3, q0, q1], [ q2, -q1, q0]]) #Other functions------------------------------------------------------ def translate(v, delta):", "tuple(q) return 2*np.array([[-q1, q0, -q3, q2], [-q2, q3, q0, -q1], [-q3, -q2, q1,", "s*rotmat[0,2] elif k == 1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1) u0 =", "a) def shift_tensor3_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat,", "R[1,1] = axis[1]*axis[1]*icos + cos R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin R[2,0] = axis[2]*axis[0]*icos", "get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_quat(q, forward=False): if forward:", "return np.dot(v, rotmat.T) def get_rotmat_euler(euler, seq='XYZ', world=True): return eulang.rotmat_euler(euler, seq=seq, world=world) def shift_vector_euler(v,", "def fix_axis_angle(axis, angle, normalize=True): if normalize: norm = np.linalg.norm(axis) if not math.isclose(norm, 1.0,", "world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] elif ori_repr", "rotmat[1,0] = 2*(q1q2 + q0q3) rotmat[1,1] = 2*(q0sq + q2sq) - 1.0 rotmat[1,2]", "forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_quat(a, quat,", "return dcm def rotate_vector_dcm(v, dcm): rotmat = get_rotmat_dcm(dcm) return np.dot(v, rotmat.T) def get_rotmat_dcm(dcm):", "get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm, seq='XYZ', world=True): mat", "not forward: shiftmat = shiftmat.T return shiftmat #Direction cosine matrix----------------------------------------------- def dcm_from_axes(A, B):", "= rotmat[2,1] - rotmat[1,2] u1 = rotmat[0,2] - rotmat[2,0] u2 = rotmat[1,0] -", "= get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm, seq='XYZ', world=True):", "get_rand_quat(): axis, angle = get_rand_axis_angle() return axis_angle_to_quat(axis, angle) def get_perturbed_quat(q): raise NotImplementedError def", "orientation['world'] quat = euler_to_quat(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis'])", "q0q2) rotmat[2,1] = 2*(q2q3 + q0q1) rotmat[2,2] = 2*(q0sq + q3sq) - 1.0", "+ q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def get_angle_between_quat(p, q): ''' Returns the angle between two", "The rows of B represent the orthonormal basis vectors of frame B. Returns", "/= norm angle = math.fmod(angle, 2*math.pi) if angle < 0.0: angle = -angle", "k == 0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0) u1 = s*rotmat[0,1] u2", "as np from . import linalg as la from . import eulang #Euler", "= axis[1]*axis[1]*icos + cos R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin R[2,0] = axis[2]*axis[0]*icos -", "dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return dcm def rotate_vector_dcm(v, dcm):", "elif ori_repr == 'dcm': euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else: raise ValueError( 'Unrecognized", "normalize_quat(q) def axis_angle_to_euler(axis, angle, seq='XYZ', world=True): rotmat = get_rotmat_axis_angle(axis, angle) euler = factorize_rotmat(rotmat,", "shiftmat def conjugate_quat(q): ''' Conjugates a quaternion in-place. ''' q[1:4] = -q[1:4] return", "math.sqrt(1.0-q[0]**2) if angle > 0.0: if angle < math.pi: axis = q[1:4]/sin else:", "> 0: if angle < math.pi: u0 = rotmat[2,1] - rotmat[1,2] u1 =", "= orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) axis, angle = quat_to_axis_angle(quat)", "world=world) return euler def dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return", "q[1]*q[2] q1q3 = q[1]*q[3] q2q3 = q[2]*q[3] rotmat[0,0] = 2*(q0sq + q1sq) -", "tuple(q) return 0.5*np.array([[-q1, -q2, -q3], [ q0, q3, -q2], [-q3, q0, q1], [", "cos R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin R[1,0] =", "get_rotmat_euler(euler, seq=seq, world=world) return factorize_rotmat(rotmat, seq=to_seq, world=to_world) def euler_to_quat(euler, seq='XYZ', world=True): axis, angle", "rotmat k = np.argmax(np.diag(rotmat)) if k == 0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s =", "angle < 0.0: angle = -angle axis = -axis if angle > math.pi:", "np.argmax(np.diag(rotmat)) if k == 0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0) u1 =", "q1, q2, q3 = tuple(q) return 0.5*np.array([[-q1, -q2, -q3], [ q0, q3, -q2],", "get_inverted_quat(q): ''' Inverts a quaternion and returns it as a new instance. '''", "and the third rotation about Z axis of the world(i.e. fixed) frame. #This", "= np.copy(q) return normalize_quat(p) def quat_is_normalized(q): norm = np.linalg.norm(q) if math.isclose(norm, 1.0, rel_tol=1e-14):", "1.0: break rt = np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt axis[1] = 2.0*zeta2*rt axis[2] =", "else: rotmat = get_rotmat_quat(q) axis, angle = extract_axis_angle_from_rotmat(rotmat) else: axis = np.array([1.0, 0.0,", "= axis[2]*axis[0]*icos - axis[1]*sin R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin R[2,2] = axis[2]*axis[2]*icos +", "to_seq, to_world): rotmat = get_rotmat_euler(euler, seq=seq, world=world) return factorize_rotmat(rotmat, seq=to_seq, world=to_world) def euler_to_quat(euler,", "B): ''' Returns the direction cosine matrix of axes(i.e. frame) B w.r.t. axes(i.e.", "shiftmat, shiftmat, a) def shift_tensor3_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr',", "''' Normalizes a quaternion and returns it as a copy. ''' p =", "ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return axis, angle def rotate_vector_axis_angle(v, axis, angle): '''", "-q1, q0]]) #Other functions------------------------------------------------------ def translate(v, delta): ''' Translates vectors inplace by delta.", "rotate_vector_dcm(v, dcm) elif n == 3: dcm = dcm_from_axes(old, new) return rotate_vector_dcm(v, dcm)", "rotmat = get_rotmat_euler(euler, seq=seq, world=world) return np.dot(v, rotmat.T) def get_rotmat_euler(euler, seq='XYZ', world=True): return", "= 2*(q0sq + q1sq) - 1.0 rotmat[0,1] = 2*(q1q2 - q0q3) rotmat[0,2] =", "return True else: return False def get_quat_prod(p, q): p0, p1, p2, p3 =", "axis[2]*sin R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin R[1,1] =", "shiftmat #Direction cosine matrix----------------------------------------------- def dcm_from_axes(A, B): ''' Returns the direction cosine matrix", "ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] euler = axis_angle_to_euler(axis, angle,", "euler = factorize_rotmat(rotmat, seq=seq, world=world) return euler def axis_angle_to_dcm(axis, angle): dcm = get_shiftmat_axis_angle(axis,", "= 2*(q2q3 + q0q1) rotmat[2,2] = 2*(q0sq + q3sq) - 1.0 return rotmat", "return np.dot(v, shiftmat.T) def shift_tensor2_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq',", "a) def shift_tensor3_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,kr,pqr',", "''' assert old.shape[0] == new.shape[0] n = old.shape[0] if n == 1: angle", "a new instance. ''' p = np.copy(q) return conjugate_quat(p) def normalize_quat(q): ''' Normalizes", "return axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm, seq='XYZ', world=True): mat = get_rotmat_dcm(dcm) euler = factorize_rotmat(mat,", "rotmat[0,2] = 2*(q1q3 + q0q2) rotmat[1,0] = 2*(q1q2 + q0q3) rotmat[1,1] = 2*(q0sq", "angle between two quaternions p and q. ''' return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot):", "else: shiftmat = rotmat return shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat(): q = np.random.random((4,)) return", "np.array([[p0, -p1, -p2, -p3], [p1, p0, -p3, p2], [p2, p3, p0, -p1], [p3,", "math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1) u0 = s*rotmat[0,1] u2 = s*rotmat[1,2] elif k ==", "Tildesley p. 349. ''' axis = np.zeros((3,)) #Generate angle: A uniform random number", "Z axis of the world(i.e. fixed) frame. #This is the same as the", "== 'quat': quat = np.array(orientation['quat']) dcm = quat_to_dcm(quat) elif ori_repr == 'euler': euler", "rotmat[2,1] = 2*(q2q3 + q0q1) rotmat[2,2] = 2*(q0sq + q3sq) - 1.0 return", "shiftmat = get_rotmat_axis_angle(-axis, angle) if not forward: shiftmat = shiftmat.T return shiftmat #Direction", "= q[0]*q[1] q0q2 = q[0]*q[2] q0q3 = q[0]*q[3] q1q2 = q[1]*q[2] q1q3 =", "orientation['angle'] euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world) elif ori_repr == 'dcm': euler =", "def any_to_quat(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) elif", "return (axis, angle) def get_rand_axis_angle(): ''' Generates a random pair of axis-angle. The", "def get_rand_quat(): q = np.random.random((4,)) return normalize_quat(q) def get_identity_quat(): return np.array([1.0, 0.0, 0.0,", "= quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q): q0, q1, q2, q3 = tuple(q)", "= -angle axis = -axis if angle > math.pi: angle = 2*math.pi -", "''' Generates a random pair of axis-angle. The axis is a random vector", "forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_dcm(dcm,", "[0.0, 2*pi) angle = 2.0*math.pi*np.random.random() while True: #Generate two uniform random numbers from", "world=world) axis, angle = extract_axis_angle_from_rotmat(rotmat) return (axis, angle) def any_to_euler(orientation, to_seq, to_world): ori_repr", "vector from the surface of a unit sphere. Algorithm from Allen & Tildesley", "vectors of frame A. B : (3,3) ndarray The rows of B represent", "R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin R[1,0] = axis[0]*axis[1]*icos", "abs_tol=1e-14, rel_tol=1e-14): axis /= norm angle = math.fmod(angle, 2*math.pi) if angle < 0.0:", "i in range(n): v[i,:] += delta return v def align(v, old, new): '''", "world=world) def euler_to_euler(euler, seq, world, to_seq, to_world): rotmat = get_rotmat_euler(euler, seq=seq, world=world) return", "q1q2 = q[1]*q[2] q1q3 = q[1]*q[3] q2q3 = q[2]*q[3] rotmat[0,0] = 2*(q0sq +", "- rotmat[1,2] u1 = rotmat[0,2] - rotmat[2,0] u2 = rotmat[1,0] - rotmat[0,1] else:", "q0, q1, q2, q3 = tuple(q) return 2*np.array([[-q1, q0, -q3, q2], [-q2, q3,", "- rotmat[0,1] else: #Find the largest entry in the diagonal of rotmat k", "rotmat = get_rotmat_quat(q) return np.dot(v, rotmat.T) def get_rotmat_quat(q): rotmat = np.empty((3,3)) q0sq =", "pq = normalize_quat(np.dot(prod_mat, q)) return pq def interpolate_quat(q1, q2, t): theta = get_angle_between_quat(q1,", "return np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward)", "-p2, -p3], [p1, p0, -p3, p2], [p2, p3, p0, -p1], [p3, -p2, p1,", "axis_angle_to_quat(axis, angle) def euler_to_dcm(euler, seq='XYZ', world=True): dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True) return", "q0, q3, -q2], [-q3, q0, q1], [ q2, -q1, q0]]) #Other functions------------------------------------------------------ def", "- q0q2) rotmat[2,1] = 2*(q2q3 + q0q1) rotmat[2,2] = 2*(q0sq + q3sq) -", "return R def extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat) angle = math.acos((trace-1)/2) if angle >", "the diagonal of rotmat k = np.argmax(np.diag(rotmat)) if k == 0: u0 =", "axis, angle = extract_axis_angle_from_rotmat(rotmat) return (axis, angle) def any_to_euler(orientation, to_seq, to_world): ori_repr =", "forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_dcm(a, dcm, forward=False): shiftmat", "def quat_deriv_to_ang_vel_mat(q): q0, q1, q2, q3 = tuple(q) return 2*np.array([[-q1, q0, -q3, q2],", "quaternion and returns it as a new instance. ''' p = np.copy(q) return", "rotmat = get_rotmat_dcm(dcm) return np.dot(v, rotmat.T) def get_rotmat_dcm(dcm): return dcm.T def shift_vector_dcm(v, dcm,", "the body frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle, normalize=True): if normalize: norm = np.linalg.norm(axis)", "Returns the direction cosine matrix of axes(i.e. frame) B w.r.t. axes(i.e. frame) A.", "-q2, -q3], [ q0, q3, -q2], [-q3, q0, q1], [ q2, -q1, q0]])", "cos R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin R[2,1] =", "angle = extract_axis_angle_from_rotmat(rotmat) return (axis, angle) def any_to_euler(orientation, to_seq, to_world): ori_repr = orientation['repr']", "u0 = s*rotmat[0,2] u1 = s*rotmat[1,2] else: u0 = 1.0 u1 = 0.0", "shiftmat, shiftmat, shiftmat, a) def get_shiftmat_dcm(dcm, forward=False): shiftmat = dcm if not forward:", "shiftmat = get_rotmat_quat(q) return shiftmat def conjugate_quat(q): ''' Conjugates a quaternion in-place. '''", "q1sq) - 1.0 rotmat[0,1] = 2*(q1q2 - q0q3) rotmat[0,2] = 2*(q1q3 + q0q2)", "1.0/(2*u2) u0 = s*rotmat[0,2] u1 = s*rotmat[1,2] else: u0 = 1.0 u1 =", "forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_axis_angle(a, axis, angle, forward=False): shiftmat =", "unit sphere. Algorithm from Allen & Tildesley p. 349. ''' axis = np.zeros((3,))", "axis[2]*axis[0]*icos - axis[1]*sin R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin R[2,2] = axis[2]*axis[2]*icos + cos", "forward=False): shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_euler(a, euler,", "zeta2 = 2.0*np.random.random() - 1.0 zetasq = zeta1**2 + zeta2**2 if zetasq <=", "dcm_to_euler(dcm, seq='XYZ', world=True): mat = get_rotmat_dcm(dcm) euler = factorize_rotmat(mat, seq=seq, world=world) return euler", "= np.dot(mat, ang_vel) return qdot def ang_vel_to_quat_deriv_mat(q): q0, q1, q2, q3 = tuple(q)", "axis_angle_to_quat(axis, angle): w = math.cos(angle/2) v = math.sin(angle/2)*axis q = np.array([w, v[0], v[1],", "frame B w.r.t. frame A. ''' return np.dot(B, A.T) def dcm_to_quat(dcm): mat =", "q1sq = q[1]**2 q2sq = q[2]**2 q3sq = q[3]**2 q0q1 = q[0]*q[1] q0q2", "#Direction cosine matrix----------------------------------------------- def dcm_from_axes(A, B): ''' Returns the direction cosine matrix of", "def normalize_quat(q): ''' Normalizes a quaternion in-place. ''' q /= np.linalg.norm(q) return q", "shiftmat, shiftmat, a) def shift_tensor3_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward)", "dcm if not forward: shiftmat = shiftmat.T return shiftmat #Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat,", "np.dot(v, shiftmat.T) def shift_tensor2_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq', shiftmat,", "dcm) def mat_is_dcm(mat): return mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12)", "= quat_to_dcm(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world", "shift_tensor3_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)", "orientation['angle'] elif ori_repr == 'dcm': axis, angle = dcm_to_axis_angle(orientation['dcm']) else: raise ValueError( 'Unrecognized", "= orientation['seq'] world = orientation['world'] axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) elif ori_repr", "np.vstack((new, z_new)) dcm = dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v, dcm) elif n == 3:", "#Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle, normalize=True): if normalize: norm = np.linalg.norm(axis) if not math.isclose(norm,", "a) def shift_tensor3_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat,", "get_rotmat_axis_angle(axis, angle) return np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis, angle): R = np.zeros((3,3)) sin =", "in range(n): v[i,:] += delta return v def align(v, old, new): ''' old", "ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return quat def rotate_vector_quat(v, q): rotmat = get_rotmat_quat(q)", "np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt axis[1] = 2.0*zeta2*rt axis[2] = 1.0 - 2.0*zetasq return", "= get_shiftmat_euler(euler, seq=seq, world=world, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_euler(a, euler, forward=False): shiftmat", "about Y-axis, and the third #rotation about X-axis of the body frame. #Axis_angle------------------------------------------------------------", "a) def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False): rotmat = get_rotmat_euler(euler, seq=seq, world=world) if forward:", "frame B. Returns ------- (3,3) ndarray The dcm of frame B w.r.t. frame", "= 2.0*np.random.random() - 1.0 zeta2 = 2.0*np.random.random() - 1.0 zetasq = zeta1**2 +", "angle = extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm, seq='XYZ', world=True): mat = get_rotmat_dcm(dcm)", "ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] dcm = axis_angle_to_dcm(axis, angle)", "math.sin(angle/2)*axis q = np.array([w, v[0], v[1], v[2]]) return normalize_quat(q) def axis_angle_to_euler(axis, angle, seq='XYZ',", "def get_shiftmat_quat(q, forward=False): if forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat = get_rotmat_quat(q) return", "return euler def axis_angle_to_dcm(axis, angle): dcm = get_shiftmat_axis_angle(axis, angle, forward=True) return dcm def", "orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) dcm = quat_to_dcm(quat) elif ori_repr", "rotmat.T) def get_rotmat_quat(q): rotmat = np.empty((3,3)) q0sq = q[0]**2 q1sq = q[1]**2 q2sq", "angle, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis,", "- 2.0*zetasq return fix_axis_angle(axis, angle) def axis_angle_to_quat(axis, angle): w = math.cos(angle/2) v =", "dcm_from_axes(A, B): ''' Returns the direction cosine matrix of axes(i.e. frame) B w.r.t.", "#Euler angle sequence: XYZ (world). First rotation about X, second rotation #about Y,", "+= delta return v def align(v, old, new): ''' old and new represent", "#about Y, and the third rotation about Z axis of the world(i.e. fixed)", "forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a, axis, angle,", "u0 = rotmat[2,1] - rotmat[1,2] u1 = rotmat[0,2] - rotmat[2,0] u2 = rotmat[1,0]", "'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] dcm = axis_angle_to_dcm(axis, angle) elif ori_repr", "two quaternions p and q. ''' return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot): mat =", "= get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return (axis, angle) def any_to_dcm(orientation): ori_repr =", "= np.array(orientation['quat']) dcm = quat_to_dcm(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq", "rt = np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt axis[1] = 2.0*zeta2*rt axis[2] = 1.0 -", "+ cos R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin R[2,1]", "= np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] quat = euler_to_quat(euler, seq=seq, world=world)", "<= 1.0: break rt = np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt axis[1] = 2.0*zeta2*rt axis[2]", "1: angle = math.acos(np.dot(old, new)) axis = la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v, axis, angle)", "np.array(orientation['quat']) euler = quat_to_euler(quat, seq=to_seq, world=to_world) elif ori_repr == 'euler': euler = np.array(orientation['euler'])", "trace = np.trace(rotmat) angle = math.acos((trace-1)/2) if angle > 0: if angle <", "inplace by delta. ''' n = v.shape[0] for i in range(n): v[i,:] +=", "return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_quat(q, forward=False): if forward: shiftmat =", "def get_rand_quat(): axis, angle = get_rand_axis_angle() return axis_angle_to_quat(axis, angle) def get_perturbed_quat(q): raise NotImplementedError", "'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] elif ori_repr == 'dcm': axis, angle", "det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12) is_orthogonal = np.allclose(np.dot(mat, mat.T), np.identity(3)) return is_orthogonal", "a quaternion in-place. ''' q /= np.linalg.norm(q) return q def get_normalized_quat(q): ''' Normalizes", "seq=seq, world=world) if forward: shiftmat = rotmat.T else: shiftmat = rotmat return shiftmat", "== 1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1) u0 = s*rotmat[0,1] u2 =", "angle) elif n == 2: z_old = la.unitized(np.cross(old[0,:], old[1,:])) z_new = la.unitized(np.cross(new[0,:], new[1,:]))", "sin = math.sqrt(1.0-q[0]**2) if angle > 0.0: if angle < math.pi: axis =", "fix_axis_angle(axis, angle, normalize=True) def quat_to_euler(q, seq='XYZ', world=True): rotmat = get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq,", "p. 349. ''' axis = np.zeros((3,)) #Generate angle: A uniform random number from", "angle = orientation['angle'] dcm = axis_angle_to_dcm(axis, angle) elif ori_repr == 'dcm': dcm =", "return eulang.factor_rotmat(rotmat, seq=seq, world=world) def euler_to_euler(euler, seq, world, to_seq, to_world): rotmat = get_rotmat_euler(euler,", "shift_tensor3_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)", "{0}'.format(ori_repr)) return quat def rotate_vector_quat(v, q): rotmat = get_rotmat_quat(q) return np.dot(v, rotmat.T) def", "== 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] dcm = axis_angle_to_dcm(axis, angle) elif", "angle) def get_rand_axis_angle(): ''' Generates a random pair of axis-angle. The axis is", "return (axis, angle) def any_to_dcm(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat", "= get_rotmat_dcm(dcm) euler = factorize_rotmat(mat, seq=seq, world=world) return euler def dcm_to_axis_angle(dcm): mat =", "angle, forward=True) return dcm def any_to_axis_angle(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat':", "rotmat[2,1] - rotmat[1,2] u1 = rotmat[0,2] - rotmat[2,0] u2 = rotmat[1,0] - rotmat[0,1]", "factorize_rotmat(rotmat, seq=to_seq, world=to_world) def euler_to_quat(euler, seq='XYZ', world=True): axis, angle = euler_to_axis_angle(euler, seq=seq, world=world)", "align(v, old, new): ''' old and new represent coordinate axes. They must be", "normalize: norm = np.linalg.norm(axis) if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14): axis /= norm", "p3, p0, -p1], [p3, -p2, p1, p0]]) pq = normalize_quat(np.dot(prod_mat, q)) return pq", "quaternion in-place. ''' q /= np.linalg.norm(q) return q def get_normalized_quat(q): ''' Normalizes a", "axis, angle): ''' Rotates vectors about axis by angle. ''' rotmat = get_rotmat_axis_angle(axis,", "'Unrecognized orientation repr {0}'.format(ori_repr)) return quat def rotate_vector_quat(v, q): rotmat = get_rotmat_quat(q) return", "dcm_to_axis_angle(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return axis, angle def rotate_vector_axis_angle(v,", "return axis, angle def rotate_vector_axis_angle(v, axis, angle): ''' Rotates vectors about axis by", "q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def get_angle_between_quat(p, q): ''' Returns the angle between two quaternions", "q /= np.linalg.norm(q) return q def get_normalized_quat(q): ''' Normalizes a quaternion and returns", "seq=to_seq, world=to_world) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return euler def rotate_vector_euler(v,", "mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return (axis, angle) def any_to_dcm(orientation): ori_repr", "a quaternion and returns it as a new instance. ''' p = np.copy(q)", "'dcm': quat = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return quat", "forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_dcm(a, dcm,", "elif ori_repr == 'dcm': dcm = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr", "''' p = np.copy(q) return conjugate_quat(p) def normalize_quat(q): ''' Normalizes a quaternion in-place.", "np.dot(mat, ang_vel) return qdot def ang_vel_to_quat_deriv_mat(q): q0, q1, q2, q3 = tuple(q) return", "world=to_world) def euler_to_quat(euler, seq='XYZ', world=True): axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) return axis_angle_to_quat(axis,", "= v.shape[0] for i in range(n): v[i,:] += delta return v def align(v,", "community as: #First rotation about Z-axis, second rotation about Y-axis, and the third", "+ q3sq) - 1.0 return rotmat def shift_vector_quat(v, q, forward=False): shiftmat = get_shiftmat_quat(q,", "angle = euler_to_axis_angle(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle", "= np.sin(angle) cos = np.cos(angle) icos = 1.0 - cos R[0,0] = axis[0]*axis[0]*icos", "R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin R[1,1] = axis[1]*axis[1]*icos + cos R[1,2] = axis[1]*axis[2]*icos", "fixed) frame. #This is the same as the sequence used in Blender. #In", "ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] elif ori_repr == 'dcm':", "p2], [p2, p3, p0, -p1], [p3, -p2, p1, p0]]) pq = normalize_quat(np.dot(prod_mat, q))", "of axes(i.e. frame) B w.r.t. axes(i.e. frame) A. Parameters ---------- A : (3,3)", "XYZ (world). First rotation about X, second rotation #about Y, and the third", "must be unit vectors. ''' assert old.shape[0] == new.shape[0] n = old.shape[0] if", "= dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v, dcm) elif n == 3: dcm = dcm_from_axes(old,", "return factorize_rotmat(rotmat, seq=to_seq, world=to_world) def euler_to_quat(euler, seq='XYZ', world=True): axis, angle = euler_to_axis_angle(euler, seq=seq,", "get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat = get_rotmat_axis_angle(-axis, angle) if not forward: shiftmat = shiftmat.T", "''' return np.dot(B, A.T) def dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat)", "of a unit sphere. Algorithm from Allen & Tildesley p. 349. ''' axis", "= (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def get_angle_between_quat(p, q): ''' Returns the angle", "def any_to_euler(orientation, to_seq, to_world): ori_repr = orientation['repr'] if ori_repr == 'quat': quat =", "return np.dot(v, shiftmat.T) def shift_tensor2_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq',", "= tuple(q) return 0.5*np.array([[-q1, -q2, -q3], [ q0, q3, -q2], [-q3, q0, q1],", "math.cos(angle/2) v = math.sin(angle/2)*axis q = np.array([w, v[0], v[1], v[2]]) return normalize_quat(q) def", "to_seq, to_world): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) euler", "angle > 0: if angle < math.pi: u0 = rotmat[2,1] - rotmat[1,2] u1", "s*rotmat[1,2] elif k == 2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2) u0 =", "assert old.shape[0] == new.shape[0] n = old.shape[0] if n == 1: angle =", "'quat': quat = np.array(orientation['quat']) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq =", "seq='XYZ', world=True): return eulang.rotmat_euler(euler, seq=seq, world=world) def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False): shiftmat", "return quat def rotate_vector_quat(v, q): rotmat = get_rotmat_quat(q) return np.dot(v, rotmat.T) def get_rotmat_quat(q):", "0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0) u1 = s*rotmat[0,1] u2 = s*rotmat[0,2]", "Parameters ---------- A : (3,3) ndarray The rows of A represent the orthonormal", "get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_euler(a, euler, forward=False): shiftmat =", "coordinate axes. They must be unit vectors. ''' assert old.shape[0] == new.shape[0] n", "return normalize_quat(q) def axis_angle_to_euler(axis, angle, seq='XYZ', world=True): rotmat = get_rotmat_axis_angle(axis, angle) euler =", "2*(q0sq + q1sq) - 1.0 rotmat[0,1] = 2*(q1q2 - q0q3) rotmat[0,2] = 2*(q1q3", "dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm,", "dcm): rotmat = get_rotmat_dcm(dcm) return np.dot(v, rotmat.T) def get_rotmat_dcm(dcm): return dcm.T def shift_vector_dcm(v,", "1.0/(2*u1) u0 = s*rotmat[0,1] u2 = s*rotmat[1,2] elif k == 2: u2 =", "raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return euler def rotate_vector_euler(v, euler, seq='XYZ', world=True):", "return eulang.rotmat_euler(euler, seq=seq, world=world) def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False): shiftmat = get_shiftmat_euler(euler,", "return normalize_quat(p) def quat_is_normalized(q): norm = np.linalg.norm(q) if math.isclose(norm, 1.0, rel_tol=1e-14): return True", "def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False): rotmat = get_rotmat_euler(euler, seq=seq, world=world) if forward: shiftmat", "shift_tensor2_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat,", "normalize_quat(p) def quat_is_normalized(q): norm = np.linalg.norm(q) if math.isclose(norm, 1.0, rel_tol=1e-14): return True else:", "They must be unit vectors. ''' assert old.shape[0] == new.shape[0] n = old.shape[0]", "angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ', world=True): return eulang.factor_rotmat(rotmat, seq=seq, world=world) def euler_to_euler(euler, seq, world,", "if angle > 0: if angle < math.pi: u0 = rotmat[2,1] - rotmat[1,2]", "ang_vel): mat = ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat, ang_vel) return qdot def ang_vel_to_quat_deriv_mat(q): q0,", "as: #First rotation about Z-axis, second rotation about Y-axis, and the third #rotation", "def shift_tensor2_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,pq', shiftmat,", "#Generate two uniform random numbers from [-1, 1) zeta1 = 2.0*np.random.random() - 1.0", "new) return rotate_vector_dcm(v, dcm) def mat_is_dcm(mat): return mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat),", "== 'quat': quat = np.array(orientation['quat']) euler = quat_to_euler(quat, seq=to_seq, world=to_world) elif ori_repr ==", "get_shiftmat_quat(q, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward)", "1) zeta1 = 2.0*np.random.random() - 1.0 zeta2 = 2.0*np.random.random() - 1.0 zetasq =", "angle) def any_to_euler(orientation, to_seq, to_world): ori_repr = orientation['repr'] if ori_repr == 'quat': quat", "la.unitized(np.cross(old[0,:], old[1,:])) z_new = la.unitized(np.cross(new[0,:], new[1,:])) axes_old = np.vstack((old, z_old)) axes_new = np.vstack((new,", "a copy. ''' p = np.copy(q) p[1:4] = -p[1:4] return p def invert_quat(q):", "orientation repr {0}'.format(ori_repr)) return quat def rotate_vector_quat(v, q): rotmat = get_rotmat_quat(q) return np.dot(v,", "z_new = la.unitized(np.cross(new[0,:], new[1,:])) axes_old = np.vstack((old, z_old)) axes_new = np.vstack((new, z_new)) dcm", "rotmat[1,1] = 2*(q0sq + q2sq) - 1.0 rotmat[1,2] = 2*(q2q3 - q0q1) rotmat[2,0]", "the orthonormal basis vectors of frame A. B : (3,3) ndarray The rows", "= get_rotmat_axis_angle(axis, angle) euler = factorize_rotmat(rotmat, seq=seq, world=world) return euler def axis_angle_to_dcm(axis, angle):", "np.array(orientation['axis']) angle = orientation['angle'] dcm = axis_angle_to_dcm(axis, angle) elif ori_repr == 'dcm': dcm", "= get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_euler(euler, seq='XYZ', world=True,", "a) def shift_tensor3_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat,", "in-place. ''' q /= np.linalg.norm(q) return q def get_normalized_quat(q): ''' Normalizes a quaternion", "def get_shiftmat_dcm(dcm, forward=False): shiftmat = dcm if not forward: shiftmat = shiftmat.T return", "def get_inverted_quat(q): ''' Inverts a quaternion and returns it as a new instance.", "understood in the Aerospace community as: #First rotation about Z-axis, second rotation about", "frame) A. Parameters ---------- A : (3,3) ndarray The rows of A represent", "= 1.0 u1 = 0.0 u2 = 0.0 return fix_axis_angle(np.array([u0, u1, u2]), angle,", "= math.sqrt(1.0-q[0]**2) if angle > 0.0: if angle < math.pi: axis = q[1:4]/sin", "q[0]*q[3] q1q2 = q[1]*q[2] q1q3 = q[1]*q[3] q2q3 = q[2]*q[3] rotmat[0,0] = 2*(q0sq", "0.0: if angle < math.pi: axis = q[1:4]/sin else: rotmat = get_rotmat_quat(q) axis,", "forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle,", "seq='XYZ', world=True): return eulang.factor_rotmat(rotmat, seq=seq, world=world) def euler_to_euler(euler, seq, world, to_seq, to_world): rotmat", "angle < math.pi: axis = q[1:4]/sin else: rotmat = get_rotmat_quat(q) axis, angle =", "rel_tol=1e-14): return True else: return False def get_quat_prod(p, q): p0, p1, p2, p3", "get_rotmat_euler(euler, seq=seq, world=world) if forward: shiftmat = rotmat.T else: shiftmat = rotmat return", "angle): dcm = get_shiftmat_axis_angle(axis, angle, forward=True) return dcm def any_to_axis_angle(orientation): ori_repr = orientation['repr']", "== 2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2) u0 = s*rotmat[0,2] u1 =", "'Unrecognized orientation repr {0}'.format(ori_repr)) return dcm def rotate_vector_dcm(v, dcm): rotmat = get_rotmat_dcm(dcm) return", "world=to_world) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return euler def rotate_vector_euler(v, euler,", "def get_perturbed_quat(q): raise NotImplementedError def quat_to_axis_angle(q): angle = 2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2) if", "it as a copy. ''' p = np.copy(q) return normalize_quat(p) def quat_is_normalized(q): norm", "rotmat[2,0] u2 = rotmat[1,0] - rotmat[0,1] else: #Find the largest entry in the", "math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14): axis /= norm angle = math.fmod(angle, 2*math.pi) if angle", "world=True): dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True) return dcm def euler_to_axis_angle(euler, seq='XYZ', world=True):", "= np.trace(rotmat) angle = math.acos((trace-1)/2) if angle > 0: if angle < math.pi:", "= get_shiftmat_axis_angle(axis, angle, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a, axis, angle, forward=False): shiftmat", "> 0.0: if angle < math.pi: axis = q[1:4]/sin else: rotmat = get_rotmat_quat(q)", "return shiftmat #Direction cosine matrix----------------------------------------------- def dcm_from_axes(A, B): ''' Returns the direction cosine", "angle = 2.0*math.pi*np.random.random() while True: #Generate two uniform random numbers from [-1, 1)", "= 2.0*zeta1*rt axis[1] = 2.0*zeta2*rt axis[2] = 1.0 - 2.0*zetasq return fix_axis_angle(axis, angle)", "= rotmat[0,2] - rotmat[2,0] u2 = rotmat[1,0] - rotmat[0,1] else: #Find the largest", "q[0]**2 q1sq = q[1]**2 q2sq = q[2]**2 q3sq = q[3]**2 q0q1 = q[0]*q[1]", "-q2, q1, q0]]) def ang_vel_to_quat_deriv(q, ang_vel): mat = ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat, ang_vel)", "= axis[0]*axis[0]*icos + cos R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin R[0,2] = axis[0]*axis[2]*icos +", "entry in the diagonal of rotmat k = np.argmax(np.diag(rotmat)) if k == 0:", "represent coordinate axes. They must be unit vectors. ''' assert old.shape[0] == new.shape[0]", "rotmat.T) def get_rotmat_euler(euler, seq='XYZ', world=True): return eulang.rotmat_euler(euler, seq=seq, world=world) def shift_vector_euler(v, euler, seq='XYZ',", "q2, -q1, q0]]) #Other functions------------------------------------------------------ def translate(v, delta): ''' Translates vectors inplace by", "axis[1]*axis[2]*icos - axis[0]*sin R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin", "python import math import numpy as np from . import linalg as la", "old.shape[0] == new.shape[0] n = old.shape[0] if n == 1: angle = math.acos(np.dot(old,", "of A represent the orthonormal basis vectors of frame A. B : (3,3)", "XYZ sequence is understood in the Aerospace community as: #First rotation about Z-axis,", "q0sq = q[0]**2 q1sq = q[1]**2 q2sq = q[2]**2 q3sq = q[3]**2 q0q1", "quat = np.array(orientation['quat']) dcm = quat_to_dcm(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler'])", "#!/usr/bin/env python import math import numpy as np from . import linalg as", "#This is the same as the sequence used in Blender. #In contrast, the", "dcm = dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v, dcm) elif n == 3: dcm =", "s*rotmat[0,1] u2 = s*rotmat[1,2] elif k == 2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s =", "= get_rotmat_euler(euler, seq=seq, world=world) if forward: shiftmat = rotmat.T else: shiftmat = rotmat", "any_to_axis_angle(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) axis, angle", "def quat_to_euler(q, seq='XYZ', world=True): rotmat = get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq, world=world) def quat_to_dcm(q):", "np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle,", "= orientation['world'] quat = euler_to_quat(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis =", "it as a new instance. ''' p = np.copy(q) return conjugate_quat(p) def normalize_quat(q):", "''' rotmat = get_rotmat_axis_angle(axis, angle) return np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis, angle): R =", "angle = extract_axis_angle_from_rotmat(rotmat) else: axis = np.array([1.0, 0.0, 0.0]) return fix_axis_angle(axis, angle, normalize=True)", "------- (3,3) ndarray The dcm of frame B w.r.t. frame A. ''' return", "forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_quat(q,", "= q[0]*q[2] q0q3 = q[0]*q[3] q1q2 = q[1]*q[2] q1q3 = q[1]*q[3] q2q3 =", "= quat_to_euler(quat, seq=to_seq, world=to_world) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq =", "p0, -p3, p2], [p2, p3, p0, -p1], [p3, -p2, p1, p0]]) pq =", "forward=False): shiftmat = get_shiftmat_quat(q, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_quat(a, quat, forward=False): shiftmat", "while True: #Generate two uniform random numbers from [-1, 1) zeta1 = 2.0*np.random.random()", "angle): R = np.zeros((3,3)) sin = np.sin(angle) cos = np.cos(angle) icos = 1.0", "'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] euler = euler_to_euler(euler,", "rotation #about Y, and the third rotation about Z axis of the world(i.e.", "q def get_conjugated_quat(q): ''' Conjugates a quaternion and returns a copy. ''' p", "== 2: z_old = la.unitized(np.cross(old[0,:], old[1,:])) z_new = la.unitized(np.cross(new[0,:], new[1,:])) axes_old = np.vstack((old,", "np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] axis, angle = euler_to_axis_angle(euler, seq=seq, world=world)", "world=world) return axis_angle_to_quat(axis, angle) def euler_to_dcm(euler, seq='XYZ', world=True): dcm = get_shiftmat_euler(euler, seq=seq, world=world,", "forward=False): shiftmat = get_rotmat_axis_angle(-axis, angle) if not forward: shiftmat = shiftmat.T return shiftmat", "v.shape[0] for i in range(n): v[i,:] += delta return v def align(v, old,", "shift_vector_dcm(v, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_dcm(a, dcm,", "A. Parameters ---------- A : (3,3) ndarray The rows of A represent the", "- cos R[0,0] = axis[0]*axis[0]*icos + cos R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin R[0,2]", "0.0, 0.0]) def get_rand_quat(): axis, angle = get_rand_axis_angle() return axis_angle_to_quat(axis, angle) def get_perturbed_quat(q):", "def get_angle_between_quat(p, q): ''' Returns the angle between two quaternions p and q.", "Returns the angle between two quaternions p and q. ''' return math.acos(np.dot(p,q)) def", "forward=True) return dcm def euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat = get_rotmat_euler(euler, seq=seq, world=world) axis,", "2*(q2q3 + q0q1) rotmat[2,2] = 2*(q0sq + q3sq) - 1.0 return rotmat def", "zeta1 = 2.0*np.random.random() - 1.0 zeta2 = 2.0*np.random.random() - 1.0 zetasq = zeta1**2", "axes(i.e. frame) B w.r.t. axes(i.e. frame) A. Parameters ---------- A : (3,3) ndarray", "euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] quat = euler_to_quat(euler, seq=seq,", "rotation about Y-axis, and the third #rotation about X-axis of the body frame.", "np.dot(v, rotmat.T) def get_rotmat_dcm(dcm): return dcm.T def shift_vector_dcm(v, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm,", "new.shape[0] n = old.shape[0] if n == 1: angle = math.acos(np.dot(old, new)) axis", "'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] quat = axis_angle_to_quat(axis, angle) elif ori_repr", "interpolate_quat(q1, q2, t): theta = get_angle_between_quat(q1, q2) q = (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return", "get_rotmat_euler(euler, seq=seq, world=world) return np.dot(v, rotmat.T) def get_rotmat_euler(euler, seq='XYZ', world=True): return eulang.rotmat_euler(euler, seq=seq,", "t): theta = get_angle_between_quat(q1, q2) q = (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def", "= np.linalg.norm(axis) if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14): axis /= norm angle =", "def shift_vector_axis_angle(v, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.dot(v, shiftmat.T)", "axis[0]*axis[1]*icos - axis[2]*sin R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin", "def shift_tensor2_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)", "angle sequence: XYZ (world). First rotation about X, second rotation #about Y, and", "shiftmat.T return shiftmat #Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ', world=True): return eulang.factor_rotmat(rotmat, seq=seq, world=world)", "Rotates vectors about axis by angle. ''' rotmat = get_rotmat_euler(euler, seq=seq, world=world) return", "'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] dcm = euler_to_dcm(euler,", "world=world) return euler def axis_angle_to_dcm(axis, angle): dcm = get_shiftmat_axis_angle(axis, angle, forward=True) return dcm", "def euler_to_dcm(euler, seq='XYZ', world=True): dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True) return dcm def", "new represent coordinate axes. They must be unit vectors. ''' assert old.shape[0] ==", "get_rotmat_axis_angle(axis, angle) euler = factorize_rotmat(rotmat, seq=seq, world=world) return euler def axis_angle_to_dcm(axis, angle): dcm", "world=True): axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) return axis_angle_to_quat(axis, angle) def euler_to_dcm(euler, seq='XYZ',", "to_world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] euler =", "orientation['world'] axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis =", "349. ''' axis = np.zeros((3,)) #Generate angle: A uniform random number from [0.0,", "q[2]*q[3] rotmat[0,0] = 2*(q0sq + q1sq) - 1.0 rotmat[0,1] = 2*(q1q2 - q0q3)", "'Unrecognized orientation repr {0}'.format(ori_repr)) return axis, angle def rotate_vector_axis_angle(v, axis, angle): ''' Rotates", "def euler_to_euler(euler, seq, world, to_seq, to_world): rotmat = get_rotmat_euler(euler, seq=seq, world=world) return factorize_rotmat(rotmat,", "angle > math.pi: angle = 2*math.pi - angle axis = -axis return (axis,", "shift_tensor2_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def", "conjugate_quat(q) def get_inverted_quat(q): ''' Inverts a quaternion and returns it as a new", "np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return", "- axis[2]*sin R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin R[1,1]", "R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin R[1,1] = axis[1]*axis[1]*icos", "= normalize_quat(np.dot(prod_mat, q)) return pq def interpolate_quat(q1, q2, t): theta = get_angle_between_quat(q1, q2)", "angle = dcm_to_axis_angle(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return axis, angle", "ori_repr == 'dcm': quat = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr))", "euler_to_euler(euler, seq, world, to_seq, to_world): rotmat = get_rotmat_euler(euler, seq=seq, world=world) return factorize_rotmat(rotmat, seq=to_seq,", "q2, q3 = tuple(q) return 0.5*np.array([[-q1, -q2, -q3], [ q0, q3, -q2], [-q3,", "== 'dcm': quat = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return", "def get_rand_axis_angle(): ''' Generates a random pair of axis-angle. The axis is a", "{0}'.format(ori_repr)) return dcm def rotate_vector_dcm(v, dcm): rotmat = get_rotmat_dcm(dcm) return np.dot(v, rotmat.T) def", "v[0], v[1], v[2]]) return normalize_quat(q) def axis_angle_to_euler(axis, angle, seq='XYZ', world=True): rotmat = get_rotmat_axis_angle(axis,", "Rotates vectors about axis by angle. ''' rotmat = get_rotmat_axis_angle(axis, angle) return np.dot(v,", "return pq def interpolate_quat(q1, q2, t): theta = get_angle_between_quat(q1, q2) q = (q1*math.sin((1.0-t)*theta)", "la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v, axis, angle) elif n == 2: z_old = la.unitized(np.cross(old[0,:],", "zeta1**2 + zeta2**2 if zetasq <= 1.0: break rt = np.sqrt(1.0-zetasq) axis[0] =", "copy. ''' p = np.copy(q) return normalize_quat(p) def quat_is_normalized(q): norm = np.linalg.norm(q) if", "def shift_tensor3_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat,", "icos = 1.0 - cos R[0,0] = axis[0]*axis[0]*icos + cos R[0,1] = axis[0]*axis[1]*icos", "seq=seq, world=world) def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False): shiftmat = get_shiftmat_euler(euler, seq=seq, world=world,", "= orientation['angle'] quat = axis_angle_to_quat(axis, angle) elif ori_repr == 'dcm': quat = dcm_to_quat(orientation['dcm'])", "np.array([w, v[0], v[1], v[2]]) return normalize_quat(q) def axis_angle_to_euler(axis, angle, seq='XYZ', world=True): rotmat =", "return dcm def euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat = get_rotmat_euler(euler, seq=seq, world=world) axis, angle", "as the sequence used in Blender. #In contrast, the XYZ sequence is understood", "a) def get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat = get_rotmat_axis_angle(-axis, angle) if not forward: shiftmat", "B. Returns ------- (3,3) ndarray The dcm of frame B w.r.t. frame A.", "== 1: angle = math.acos(np.dot(old, new)) axis = la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v, axis,", "about axis by angle. ''' rotmat = get_rotmat_euler(euler, seq=seq, world=world) return np.dot(v, rotmat.T)", "angle, forward=False): shiftmat = get_rotmat_axis_angle(-axis, angle) if not forward: shiftmat = shiftmat.T return", "+ zeta2**2 if zetasq <= 1.0: break rt = np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt", "Y-axis, and the third #rotation about X-axis of the body frame. #Axis_angle------------------------------------------------------------ def", "np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] euler = euler_to_euler(euler, seq, world, to_seq,", "2.0*math.pi*np.random.random() while True: #Generate two uniform random numbers from [-1, 1) zeta1 =", "forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return", "euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_euler(a,", "seq = orientation['seq'] world = orientation['world'] axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) elif", "= dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return quat def rotate_vector_quat(v,", "if math.isclose(norm, 1.0, rel_tol=1e-14): return True else: return False def get_quat_prod(p, q): p0,", "euler_to_dcm(euler, seq='XYZ', world=True): dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True) return dcm def euler_to_axis_angle(euler,", "from Allen & Tildesley p. 349. ''' axis = np.zeros((3,)) #Generate angle: A", "rotation about Z axis of the world(i.e. fixed) frame. #This is the same", "world=True): mat = get_rotmat_dcm(dcm) euler = factorize_rotmat(mat, seq=seq, world=world) return euler def dcm_to_axis_angle(dcm):", "return np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis, angle): R = np.zeros((3,3)) sin = np.sin(angle) cos", "orientation['angle'] quat = axis_angle_to_quat(axis, angle) elif ori_repr == 'dcm': quat = dcm_to_quat(orientation['dcm']) else:", "-p3], [p1, p0, -p3, p2], [p2, p3, p0, -p1], [p3, -p2, p1, p0]])", "= np.argmax(np.diag(rotmat)) if k == 0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0) u1", "if ori_repr == 'quat': quat = np.array(orientation['quat']) dcm = quat_to_dcm(quat) elif ori_repr ==", "= 2*(q1q3 + q0q2) rotmat[1,0] = 2*(q1q2 + q0q3) rotmat[1,1] = 2*(q0sq +", "seq = orientation['seq'] world = orientation['world'] euler = euler_to_euler(euler, seq, world, to_seq, to_world)", "angle = extract_axis_angle_from_rotmat(mat) return (axis, angle) def any_to_dcm(orientation): ori_repr = orientation['repr'] if ori_repr", "''' return conjugate_quat(q) def get_inverted_quat(q): ''' Inverts a quaternion and returns it as", "shift_tensor3_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)", "'dcm': dcm = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return dcm", "Conjugates a quaternion in-place. ''' q[1:4] = -q[1:4] return q def get_conjugated_quat(q): '''", "2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2) if angle > 0.0: if angle < math.pi: axis", "shiftmat = rotmat return shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat(): q = np.random.random((4,)) return normalize_quat(q)", "np.trace(rotmat) angle = math.acos((trace-1)/2) if angle > 0: if angle < math.pi: u0", "#First rotation about Z-axis, second rotation about Y-axis, and the third #rotation about", "= math.acos(np.dot(old, new)) axis = la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v, axis, angle) elif n", "-p1], [p3, -p2, p1, p0]]) pq = normalize_quat(np.dot(prod_mat, q)) return pq def interpolate_quat(q1,", "2*(q1q3 + q0q2) rotmat[1,0] = 2*(q1q2 + q0q3) rotmat[1,1] = 2*(q0sq + q2sq)", "normalize_quat(q) def get_angle_between_quat(p, q): ''' Returns the angle between two quaternions p and", "return q def get_conjugated_quat(q): ''' Conjugates a quaternion and returns a copy. '''", "get_conjugated_quat(q): ''' Conjugates a quaternion and returns a copy. ''' p = np.copy(q)", "1.0 - 2.0*zetasq return fix_axis_angle(axis, angle) def axis_angle_to_quat(axis, angle): w = math.cos(angle/2) v", "return np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q): q0, q1, q2, q3 = tuple(q) return 2*np.array([[-q1,", "angle > 0.0: if angle < math.pi: axis = q[1:4]/sin else: rotmat =", "axis_angle_to_dcm(axis, angle) elif ori_repr == 'dcm': dcm = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized", "elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world']", "angle axis = -axis return (axis, angle) def get_rand_axis_angle(): ''' Generates a random", "seq=to_seq, world=to_world) elif ori_repr == 'dcm': euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else: raise", "q2sq = q[2]**2 q3sq = q[3]**2 q0q1 = q[0]*q[1] q0q2 = q[0]*q[2] q0q3", "u1 = s*rotmat[1,2] else: u0 = 1.0 u1 = 0.0 u2 = 0.0", "cosine matrix----------------------------------------------- def dcm_from_axes(A, B): ''' Returns the direction cosine matrix of axes(i.e.", "= math.acos((trace-1)/2) if angle > 0: if angle < math.pi: u0 = rotmat[2,1]", "'Unrecognized orientation repr {0}'.format(ori_repr)) return euler def rotate_vector_euler(v, euler, seq='XYZ', world=True): ''' Rotates", "NotImplementedError def quat_to_axis_angle(q): angle = 2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2) if angle > 0.0:", "angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a)", "= get_rotmat_axis_angle(-axis, angle) if not forward: shiftmat = shiftmat.T return shiftmat #Direction cosine", "axis by angle. ''' rotmat = get_rotmat_axis_angle(axis, angle) return np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis,", "shift_vector_axis_angle(v, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.dot(v, shiftmat.T) def", "seq=seq, world=world) return axis_angle_to_quat(axis, angle) def euler_to_dcm(euler, seq='XYZ', world=True): dcm = get_shiftmat_euler(euler, seq=seq,", "axis, angle = dcm_to_axis_angle(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return axis,", "1.0 - cos R[0,0] = axis[0]*axis[0]*icos + cos R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin", "world=True): rotmat = get_rotmat_euler(euler, seq=seq, world=world) axis, angle = extract_axis_angle_from_rotmat(rotmat) return (axis, angle)", "= 2*(q0sq + q2sq) - 1.0 rotmat[1,2] = 2*(q2q3 - q0q1) rotmat[2,0] =", "A : (3,3) ndarray The rows of A represent the orthonormal basis vectors", "qdot = np.dot(mat, ang_vel) return qdot def ang_vel_to_quat_deriv_mat(q): q0, q1, q2, q3 =", "get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_axis_angle(axis, angle, forward=False):", "= get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq, world=world) def quat_to_dcm(q): return get_shiftmat_quat(q, forward=True) def any_to_quat(orientation):", "u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2) u0 = s*rotmat[0,2] u1 = s*rotmat[1,2] else:", "rotate_vector_axis_angle(v, axis, angle): ''' Rotates vectors about axis by angle. ''' rotmat =", "angle. ''' rotmat = get_rotmat_axis_angle(axis, angle) return np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis, angle): R", "def interpolate_quat(q1, q2, t): theta = get_angle_between_quat(q1, q2) q = (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta)", "v[2]]) return normalize_quat(q) def axis_angle_to_euler(axis, angle, seq='XYZ', world=True): rotmat = get_rotmat_axis_angle(axis, angle) euler", "forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_axis_angle(a,", "2*pi) angle = 2.0*math.pi*np.random.random() while True: #Generate two uniform random numbers from [-1,", "== 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] quat = axis_angle_to_quat(axis, angle) elif", "shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat(): q = np.random.random((4,)) return normalize_quat(q) def get_identity_quat(): return np.array([1.0,", "def quat_is_normalized(q): norm = np.linalg.norm(q) if math.isclose(norm, 1.0, rel_tol=1e-14): return True else: return", "eulang.rotmat_euler(euler, seq=seq, world=world) def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False): shiftmat = get_shiftmat_euler(euler, seq=seq,", "= np.copy(q) p[1:4] = -p[1:4] return p def invert_quat(q): ''' Inverts a quaternion", "get_rotmat_euler(euler, seq='XYZ', world=True): return eulang.rotmat_euler(euler, seq=seq, world=world) def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False):", "quaternion in-place. ''' q[1:4] = -q[1:4] return q def get_conjugated_quat(q): ''' Conjugates a", "R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin R[2,2] = axis[2]*axis[2]*icos", "uniform random numbers from [-1, 1) zeta1 = 2.0*np.random.random() - 1.0 zeta2 =", "k = np.argmax(np.diag(rotmat)) if k == 0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0)", "- axis[1]*sin R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin R[2,2] = axis[2]*axis[2]*icos + cos return", "random numbers from [-1, 1) zeta1 = 2.0*np.random.random() - 1.0 zeta2 = 2.0*np.random.random()", "np.array(orientation['axis']) angle = orientation['angle'] elif ori_repr == 'dcm': axis, angle = dcm_to_axis_angle(orientation['dcm']) else:", "A. ''' return np.dot(B, A.T) def dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm) axis, angle =", "np.cos(angle) icos = 1.0 - cos R[0,0] = axis[0]*axis[0]*icos + cos R[0,1] =", "forward=True) def any_to_quat(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat'])", "2.0*np.random.random() - 1.0 zetasq = zeta1**2 + zeta2**2 if zetasq <= 1.0: break", "== 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] dcm =", "forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm,", "vectors about axis by angle. ''' rotmat = get_rotmat_axis_angle(axis, angle) return np.dot(v, rotmat.T)", "1.0 rotmat[1,2] = 2*(q2q3 - q0q1) rotmat[2,0] = 2*(q1q3 - q0q2) rotmat[2,1] =", "---------- A : (3,3) ndarray The rows of A represent the orthonormal basis", "get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq, world=world) def quat_to_dcm(q): return get_shiftmat_quat(q, forward=True) def any_to_quat(orientation): ori_repr", "Z-axis, second rotation about Y-axis, and the third #rotation about X-axis of the", "[-1, 1) zeta1 = 2.0*np.random.random() - 1.0 zeta2 = 2.0*np.random.random() - 1.0 zetasq", "= get_rotmat_euler(euler, seq=seq, world=world) return factorize_rotmat(rotmat, seq=to_seq, world=to_world) def euler_to_quat(euler, seq='XYZ', world=True): axis,", "ori_repr == 'quat': quat = np.array(orientation['quat']) axis, angle = quat_to_axis_angle(quat) elif ori_repr ==", "v def align(v, old, new): ''' old and new represent coordinate axes. They", "= q[2]**2 q3sq = q[3]**2 q0q1 = q[0]*q[1] q0q2 = q[0]*q[2] q0q3 =", "axis_angle_to_dcm(axis, angle): dcm = get_shiftmat_axis_angle(axis, angle, forward=True) return dcm def any_to_axis_angle(orientation): ori_repr =", "0.0, 0.0, 0.0]) def get_rand_quat(): axis, angle = get_rand_axis_angle() return axis_angle_to_quat(axis, angle) def", "'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] quat = euler_to_quat(euler,", "np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_quat(q, forward=False): if forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q))", "numpy as np from . import linalg as la from . import eulang", "= 1.0/(2*u1) u0 = s*rotmat[0,1] u2 = s*rotmat[1,2] elif k == 2: u2", "factorize_rotmat(rotmat, seq=seq, world=world) def quat_to_dcm(q): return get_shiftmat_quat(q, forward=True) def any_to_quat(orientation): ori_repr = orientation['repr']", "def get_normalized_quat(q): ''' Normalizes a quaternion and returns it as a copy. '''", "else: return False def get_quat_prod(p, q): p0, p1, p2, p3 = tuple(p) prod_mat", "shift_vector_quat(v, q, forward=False): shiftmat = get_shiftmat_quat(q, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_quat(a, quat,", "n == 3: dcm = dcm_from_axes(old, new) return rotate_vector_dcm(v, dcm) def mat_is_dcm(mat): return", "seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] elif", "u2 = s*rotmat[0,2] elif k == 1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1)", "- q0q1) rotmat[2,0] = 2*(q1q3 - q0q2) rotmat[2,1] = 2*(q2q3 + q0q1) rotmat[2,2]", "== 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] euler =", "rotmat = get_rotmat_euler(euler, seq=seq, world=world) return factorize_rotmat(rotmat, seq=to_seq, world=to_world) def euler_to_quat(euler, seq='XYZ', world=True):", "= extract_axis_angle_from_rotmat(rotmat) else: axis = np.array([1.0, 0.0, 0.0]) return fix_axis_angle(axis, angle, normalize=True) def", "dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return (axis, angle) def any_to_dcm(orientation):", "= q[2]*q[3] rotmat[0,0] = 2*(q0sq + q1sq) - 1.0 rotmat[0,1] = 2*(q1q2 -", "shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False): shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward) return np.dot(v,", "= q[1]*q[2] q1q3 = q[1]*q[3] q2q3 = q[2]*q[3] rotmat[0,0] = 2*(q0sq + q1sq)", "if angle < math.pi: u0 = rotmat[2,1] - rotmat[1,2] u1 = rotmat[0,2] -", "the angle between two quaternions p and q. ''' return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q,", "1.0 return rotmat def shift_vector_quat(v, q, forward=False): shiftmat = get_shiftmat_quat(q, forward=forward) return np.dot(v,", "== 0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0) u1 = s*rotmat[0,1] u2 =", "second rotation #about Y, and the third rotation about Z axis of the", "return conjugate_quat(q) def get_inverted_quat(q): ''' Inverts a quaternion and returns it as a", "fix_axis_angle(axis, angle) def axis_angle_to_quat(axis, angle): w = math.cos(angle/2) v = math.sin(angle/2)*axis q =", "np.dot(v, shiftmat.T) def shift_tensor2_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq', shiftmat,", "axis = np.array([1.0, 0.0, 0.0]) return fix_axis_angle(axis, angle, normalize=True) def quat_to_euler(q, seq='XYZ', world=True):", "vectors inplace by delta. ''' n = v.shape[0] for i in range(n): v[i,:]", "= extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm, seq='XYZ', world=True): mat = get_rotmat_dcm(dcm) euler", "forward: shiftmat = rotmat.T else: shiftmat = rotmat return shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat():", "by delta. ''' n = v.shape[0] for i in range(n): v[i,:] += delta", "def rotate_vector_dcm(v, dcm): rotmat = get_rotmat_dcm(dcm) return np.dot(v, rotmat.T) def get_rotmat_dcm(dcm): return dcm.T", "q[1]**2 q2sq = q[2]**2 q3sq = q[3]**2 q0q1 = q[0]*q[1] q0q2 = q[0]*q[2]", "= q[3]**2 q0q1 = q[0]*q[1] q0q2 = q[0]*q[2] q0q3 = q[0]*q[3] q1q2 =", "seq='XYZ', world=True): mat = get_rotmat_dcm(dcm) euler = factorize_rotmat(mat, seq=seq, world=world) return euler def", "= euler_to_euler(euler, seq, world, to_seq, to_world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis'])", "def euler_to_quat(euler, seq='XYZ', world=True): axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) return axis_angle_to_quat(axis, angle)", "rotate_vector_euler(v, euler, seq='XYZ', world=True): ''' Rotates vectors about axis by angle. ''' rotmat", "return fix_axis_angle(axis, angle, normalize=True) def quat_to_euler(q, seq='XYZ', world=True): rotmat = get_rotmat_quat(q) return factorize_rotmat(rotmat,", "Y, and the third rotation about Z axis of the world(i.e. fixed) frame.", "orientation['seq'] world = orientation['world'] axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) elif ori_repr ==", "def get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat = get_rotmat_axis_angle(-axis, angle) if not forward: shiftmat =", "get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False): rotmat = get_rotmat_euler(euler, seq=seq, world=world) if forward: shiftmat =", "q[3]**2 q0q1 = q[0]*q[1] q0q2 = q[0]*q[2] q0q3 = q[0]*q[3] q1q2 = q[1]*q[2]", "return rotate_vector_dcm(v, dcm) def mat_is_dcm(mat): return mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat), 1.0,", "return shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat(): q = np.random.random((4,)) return normalize_quat(q) def get_identity_quat(): return", "axis = np.array(orientation['axis']) angle = orientation['angle'] elif ori_repr == 'dcm': axis, angle =", "angle = get_rand_axis_angle() return axis_angle_to_quat(axis, angle) def get_perturbed_quat(q): raise NotImplementedError def quat_to_axis_angle(q): angle", "False def get_quat_prod(p, q): p0, p1, p2, p3 = tuple(p) prod_mat = np.array([[p0,", "-p[1:4] return p def invert_quat(q): ''' Inverts a quaternion in-place. ''' return conjugate_quat(q)", "def mat_is_dcm(mat): return mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12) is_orthogonal", "angle = math.acos((trace-1)/2) if angle > 0: if angle < math.pi: u0 =", "world=world) return np.dot(v, rotmat.T) def get_rotmat_euler(euler, seq='XYZ', world=True): return eulang.rotmat_euler(euler, seq=seq, world=world) def", "forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat,", "== 'dcm': axis, angle = dcm_to_axis_angle(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr))", ". import linalg as la from . import eulang #Euler angle sequence: XYZ", "quat = np.array(orientation['quat']) axis, angle = quat_to_axis_angle(quat) elif ori_repr == 'euler': euler =", "cos R[0,0] = axis[0]*axis[0]*icos + cos R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin R[0,2] =", "def shift_tensor2_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)", "#Other functions------------------------------------------------------ def translate(v, delta): ''' Translates vectors inplace by delta. ''' n", "ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat, ang_vel) return qdot def ang_vel_to_quat_deriv_mat(q): q0, q1, q2, q3", "uniform random number from [0.0, 2*pi) angle = 2.0*math.pi*np.random.random() while True: #Generate two", "repr {0}'.format(ori_repr)) return euler def rotate_vector_euler(v, euler, seq='XYZ', world=True): ''' Rotates vectors about", "return q def get_normalized_quat(q): ''' Normalizes a quaternion and returns it as a", "v[i,:] += delta return v def align(v, old, new): ''' old and new", "angle) def any_to_dcm(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat'])", "2*(q1q2 - q0q3) rotmat[0,2] = 2*(q1q3 + q0q2) rotmat[1,0] = 2*(q1q2 + q0q3)", "ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return euler def rotate_vector_euler(v, euler, seq='XYZ', world=True): '''", "quat = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return quat def", "'quat': quat = np.array(orientation['quat']) axis, angle = quat_to_axis_angle(quat) elif ori_repr == 'euler': euler", "0.0 return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True) def shift_vector_axis_angle(v, axis, angle, forward=False): shiftmat", "axis_angle_to_quat(axis, angle) def get_perturbed_quat(q): raise NotImplementedError def quat_to_axis_angle(q): angle = 2*math.acos(q[0]) sin =", "return normalize_quat(q) def get_identity_quat(): return np.array([1.0, 0.0, 0.0, 0.0]) def get_rand_quat(): axis, angle", "v[1], v[2]]) return normalize_quat(q) def axis_angle_to_euler(axis, angle, seq='XYZ', world=True): rotmat = get_rotmat_axis_angle(axis, angle)", "ori_repr == 'quat': quat = np.array(orientation['quat']) elif ori_repr == 'euler': euler = np.array(orientation['euler'])", "axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis'])", "get_rotmat_axis_angle(-axis, angle) if not forward: shiftmat = shiftmat.T return shiftmat #Direction cosine matrix-----------------------------------------------", "'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world)", "shiftmat = dcm if not forward: shiftmat = shiftmat.T return shiftmat #Euler angle-----------------------------------------------------------", "= np.array(orientation['axis']) angle = orientation['angle'] euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world) elif ori_repr", "shiftmat, a) def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False): rotmat = get_rotmat_euler(euler, seq=seq, world=world) if", "delta. ''' n = v.shape[0] for i in range(n): v[i,:] += delta return", "the sequence used in Blender. #In contrast, the XYZ sequence is understood in", "angle = 2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2) if angle > 0.0: if angle <", "np.copy(q) return normalize_quat(p) def quat_is_normalized(q): norm = np.linalg.norm(q) if math.isclose(norm, 1.0, rel_tol=1e-14): return", "rotmat = get_rotmat_quat(q) axis, angle = extract_axis_angle_from_rotmat(rotmat) else: axis = np.array([1.0, 0.0, 0.0])", "extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm, seq='XYZ', world=True): mat = get_rotmat_dcm(dcm) euler =", "matrix----------------------------------------------- def dcm_from_axes(A, B): ''' Returns the direction cosine matrix of axes(i.e. frame)", "seq=seq, world=world) def quat_to_dcm(q): return get_shiftmat_quat(q, forward=True) def any_to_quat(orientation): ori_repr = orientation['repr'] if", "R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin R[2,1] = axis[1]*axis[2]*icos", "axis of the world(i.e. fixed) frame. #This is the same as the sequence", "Blender. #In contrast, the XYZ sequence is understood in the Aerospace community as:", "angle) euler = factorize_rotmat(rotmat, seq=seq, world=world) return euler def axis_angle_to_dcm(axis, angle): dcm =", "= orientation['angle'] elif ori_repr == 'dcm': axis, angle = dcm_to_axis_angle(orientation['dcm']) else: raise ValueError(", "quat = np.array(orientation['quat']) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq']", "Inverts a quaternion and returns it as a new instance. ''' p =", "return np.dot(v, rotmat.T) def get_rotmat_quat(q): rotmat = np.empty((3,3)) q0sq = q[0]**2 q1sq =", "p and q. ''' return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot): mat = quat_deriv_to_ang_vel_mat(q) return", "q0q1) rotmat[2,0] = 2*(q1q3 - q0q2) rotmat[2,1] = 2*(q2q3 + q0q1) rotmat[2,2] =", "elif n == 3: dcm = dcm_from_axes(old, new) return rotate_vector_dcm(v, dcm) def mat_is_dcm(mat):", "range(n): v[i,:] += delta return v def align(v, old, new): ''' old and", "np.array(orientation['axis']) angle = orientation['angle'] euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world) elif ori_repr ==", "angle, normalize=True) def shift_vector_axis_angle(v, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return", "angle) elif ori_repr == 'dcm': quat = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation", "= axis_angle_to_quat(axis, angle) elif ori_repr == 'dcm': quat = dcm_to_quat(orientation['dcm']) else: raise ValueError(", "get_angle_between_quat(p, q): ''' Returns the angle between two quaternions p and q. '''", "axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a,", "of frame A. B : (3,3) ndarray The rows of B represent the", "math.pi: axis = q[1:4]/sin else: rotmat = get_rotmat_quat(q) axis, angle = extract_axis_angle_from_rotmat(rotmat) else:", "angle): ''' Rotates vectors about axis by angle. ''' rotmat = get_rotmat_axis_angle(axis, angle)", "= euler_to_axis_angle(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle =", "seq = orientation['seq'] world = orientation['world'] dcm = euler_to_dcm(euler, seq=seq, world=world) elif ori_repr", "return euler def dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return (axis,", "axes. They must be unit vectors. ''' assert old.shape[0] == new.shape[0] n =", "angle) elif ori_repr == 'dcm': dcm = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation", "return rotate_vector_axis_angle(v, axis, angle) elif n == 2: z_old = la.unitized(np.cross(old[0,:], old[1,:])) z_new", "= np.linalg.norm(q) if math.isclose(norm, 1.0, rel_tol=1e-14): return True else: return False def get_quat_prod(p,", "return normalize_quat(q) def get_angle_between_quat(p, q): ''' Returns the angle between two quaternions p", "break rt = np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt axis[1] = 2.0*zeta2*rt axis[2] = 1.0", "third #rotation about X-axis of the body frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle, normalize=True):", "rotate_vector_axis_angle(v, axis, angle) elif n == 2: z_old = la.unitized(np.cross(old[0,:], old[1,:])) z_new =", "angle = quat_to_axis_angle(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq']", "angle) def axis_angle_to_quat(axis, angle): w = math.cos(angle/2) v = math.sin(angle/2)*axis q = np.array([w,", "axis by angle. ''' rotmat = get_rotmat_euler(euler, seq=seq, world=world) return np.dot(v, rotmat.T) def", "by angle. ''' rotmat = get_rotmat_euler(euler, seq=seq, world=world) return np.dot(v, rotmat.T) def get_rotmat_euler(euler,", "dcm = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return dcm def", "else: shiftmat = get_rotmat_quat(q) return shiftmat def conjugate_quat(q): ''' Conjugates a quaternion in-place.", "math.acos((trace-1)/2) if angle > 0: if angle < math.pi: u0 = rotmat[2,1] -", "euler_to_axis_angle(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle']", "seq=seq, world=world, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler,", "axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)", "= math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0) u1 = s*rotmat[0,1] u2 = s*rotmat[0,2] elif k", "= get_rotmat_quat(q) return np.dot(v, rotmat.T) def get_rotmat_quat(q): rotmat = np.empty((3,3)) q0sq = q[0]**2", "2*(q0sq + q3sq) - 1.0 return rotmat def shift_vector_quat(v, q, forward=False): shiftmat =", "world=world, forward=True) return dcm def euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat = get_rotmat_euler(euler, seq=seq, world=world)", "new[1,:])) axes_old = np.vstack((old, z_old)) axes_new = np.vstack((new, z_new)) dcm = dcm_from_axes(axes_old, axes_new)", "def euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat = get_rotmat_euler(euler, seq=seq, world=world) axis, angle = extract_axis_angle_from_rotmat(rotmat)", "= orientation['seq'] world = orientation['world'] euler = euler_to_euler(euler, seq, world, to_seq, to_world) elif", "= get_angle_between_quat(q1, q2) q = (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def get_angle_between_quat(p, q):", "sin = np.sin(angle) cos = np.cos(angle) icos = 1.0 - cos R[0,0] =", "return np.array([1.0, 0.0, 0.0, 0.0]) def get_rand_quat(): axis, angle = get_rand_axis_angle() return axis_angle_to_quat(axis,", "+ q0q1) rotmat[2,2] = 2*(q0sq + q3sq) - 1.0 return rotmat def shift_vector_quat(v,", "la from . import eulang #Euler angle sequence: XYZ (world). First rotation about", "euler_to_quat(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle']", "= get_rotmat_axis_angle(axis, angle) return np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis, angle): R = np.zeros((3,3)) sin", "rotmat[0,2] - rotmat[2,0] u2 = rotmat[1,0] - rotmat[0,1] else: #Find the largest entry", "- q0q3) rotmat[0,2] = 2*(q1q3 + q0q2) rotmat[1,0] = 2*(q1q2 + q0q3) rotmat[1,1]", "rotmat[0,1] else: #Find the largest entry in the diagonal of rotmat k =", "shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_dcm(a, dcm, forward=False):", "old and new represent coordinate axes. They must be unit vectors. ''' assert", "factorize_rotmat(rotmat, seq='XYZ', world=True): return eulang.factor_rotmat(rotmat, seq=seq, world=world) def euler_to_euler(euler, seq, world, to_seq, to_world):", "orientation repr {0}'.format(ori_repr)) return dcm def rotate_vector_dcm(v, dcm): rotmat = get_rotmat_dcm(dcm) return np.dot(v,", "return axis_angle_to_quat(axis, angle) def get_perturbed_quat(q): raise NotImplementedError def quat_to_axis_angle(q): angle = 2*math.acos(q[0]) sin", "angle = 2*math.pi - angle axis = -axis return (axis, angle) def get_rand_axis_angle():", "shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_dcm(a, dcm, forward=False): shiftmat =", "k == 1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1) u0 = s*rotmat[0,1] u2", "return 2*np.array([[-q1, q0, -q3, q2], [-q2, q3, q0, -q1], [-q3, -q2, q1, q0]])", "= math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1) u0 = s*rotmat[0,1] u2 = s*rotmat[1,2] elif k", "angle = euler_to_axis_angle(euler, seq=seq, world=world) return axis_angle_to_quat(axis, angle) def euler_to_dcm(euler, seq='XYZ', world=True): dcm", "X-axis of the body frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle, normalize=True): if normalize: norm", "ori_repr == 'quat': quat = np.array(orientation['quat']) dcm = quat_to_dcm(quat) elif ori_repr == 'euler':", "else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return quat def rotate_vector_quat(v, q): rotmat", "in the Aerospace community as: #First rotation about Z-axis, second rotation about Y-axis,", "= ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat, ang_vel) return qdot def ang_vel_to_quat_deriv_mat(q): q0, q1, q2,", "#Generate angle: A uniform random number from [0.0, 2*pi) angle = 2.0*math.pi*np.random.random() while", "a random pair of axis-angle. The axis is a random vector from the", "from the surface of a unit sphere. Algorithm from Allen & Tildesley p.", "a unit sphere. Algorithm from Allen & Tildesley p. 349. ''' axis =", "shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_euler(euler, seq='XYZ',", "= euler_to_quat(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle =", "Aerospace community as: #First rotation about Z-axis, second rotation about Y-axis, and the", "if angle < 0.0: angle = -angle axis = -axis if angle >", "ori_repr == 'quat': quat = np.array(orientation['quat']) euler = quat_to_euler(quat, seq=to_seq, world=to_world) elif ori_repr", "the XYZ sequence is understood in the Aerospace community as: #First rotation about", "delta): ''' Translates vectors inplace by delta. ''' n = v.shape[0] for i", "np.dot(B, A.T) def dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis,", "q def get_normalized_quat(q): ''' Normalizes a quaternion and returns it as a copy.", "seq='XYZ', world=True): rotmat = get_rotmat_euler(euler, seq=seq, world=world) axis, angle = extract_axis_angle_from_rotmat(rotmat) return (axis,", "axis[0]*sin R[2,2] = axis[2]*axis[2]*icos + cos return R def extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat)", "#Quaternion----------------------------------------------------------- def get_rand_quat(): q = np.random.random((4,)) return normalize_quat(q) def get_identity_quat(): return np.array([1.0, 0.0,", "-q[1:4] return q def get_conjugated_quat(q): ''' Conjugates a quaternion and returns a copy.", "= rotmat[1,0] - rotmat[0,1] else: #Find the largest entry in the diagonal of", "(3,3) ndarray The dcm of frame B w.r.t. frame A. ''' return np.dot(B,", "forward=False): if forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat = get_rotmat_quat(q) return shiftmat def", "''' Returns the direction cosine matrix of axes(i.e. frame) B w.r.t. axes(i.e. frame)", "= euler_to_dcm(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle =", "= 2.0*np.random.random() - 1.0 zetasq = zeta1**2 + zeta2**2 if zetasq <= 1.0:", "euler = quat_to_euler(quat, seq=to_seq, world=to_world) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq", "p0, -p1], [p3, -p2, p1, p0]]) pq = normalize_quat(np.dot(prod_mat, q)) return pq def", "def ang_vel_to_quat_deriv_mat(q): q0, q1, q2, q3 = tuple(q) return 0.5*np.array([[-q1, -q2, -q3], [", "orientation['world'] dcm = euler_to_dcm(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis'])", "return rotmat def shift_vector_quat(v, q, forward=False): shiftmat = get_shiftmat_quat(q, forward=forward) return np.dot(v, shiftmat.T)", "= tuple(q) return 2*np.array([[-q1, q0, -q3, q2], [-q2, q3, q0, -q1], [-q3, -q2,", "def rotate_vector_euler(v, euler, seq='XYZ', world=True): ''' Rotates vectors about axis by angle. '''", "+ cos R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin R[1,0]", "q1q3 = q[1]*q[3] q2q3 = q[2]*q[3] rotmat[0,0] = 2*(q0sq + q1sq) - 1.0", "= get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_euler(a, euler, forward=False): shiftmat", "if k == 0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0) u1 = s*rotmat[0,1]", "ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] dcm", "ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] quat = axis_angle_to_quat(axis, angle)", "'dcm': axis, angle = dcm_to_axis_angle(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return", "= la.unitized(np.cross(new[0,:], new[1,:])) axes_old = np.vstack((old, z_old)) axes_new = np.vstack((new, z_new)) dcm =", "get_rotmat_dcm(dcm): return dcm.T def shift_vector_dcm(v, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.dot(v,", "ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return dcm def rotate_vector_dcm(v, dcm): rotmat = get_rotmat_dcm(dcm)", "q3sq) - 1.0 return rotmat def shift_vector_quat(v, q, forward=False): shiftmat = get_shiftmat_quat(q, forward=forward)", "get_shiftmat_dcm(dcm, forward=False): shiftmat = dcm if not forward: shiftmat = shiftmat.T return shiftmat", "[-q3, q0, q1], [ q2, -q1, q0]]) #Other functions------------------------------------------------------ def translate(v, delta): '''", "dcm_from_axes(old, new) return rotate_vector_dcm(v, dcm) def mat_is_dcm(mat): return mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one =", "world=True, forward=False): shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_euler(a,", "2*np.array([[-q1, q0, -q3, q2], [-q2, q3, q0, -q1], [-q3, -q2, q1, q0]]) def", "def factorize_rotmat(rotmat, seq='XYZ', world=True): return eulang.factor_rotmat(rotmat, seq=seq, world=world) def euler_to_euler(euler, seq, world, to_seq,", "q0q1) rotmat[2,2] = 2*(q0sq + q3sq) - 1.0 return rotmat def shift_vector_quat(v, q,", "any_to_euler(orientation, to_seq, to_world): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat'])", "s = 1.0/(2*u0) u1 = s*rotmat[0,1] u2 = s*rotmat[0,2] elif k == 1:", "axis, angle = extract_axis_angle_from_rotmat(mat) return (axis, angle) def any_to_dcm(orientation): ori_repr = orientation['repr'] if", "q = np.random.random((4,)) return normalize_quat(q) def get_identity_quat(): return np.array([1.0, 0.0, 0.0, 0.0]) def", "axis[0]*axis[2]*icos + axis[1]*sin R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin R[1,1] = axis[1]*axis[1]*icos + cos", "= -q[1:4] return q def get_conjugated_quat(q): ''' Conjugates a quaternion and returns a", "- 1.0 return rotmat def shift_vector_quat(v, q, forward=False): shiftmat = get_shiftmat_quat(q, forward=forward) return", "dcm = euler_to_dcm(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle", "= 1.0 - 2.0*zetasq return fix_axis_angle(axis, angle) def axis_angle_to_quat(axis, angle): w = math.cos(angle/2)", "def axis_angle_to_euler(axis, angle, seq='XYZ', world=True): rotmat = get_rotmat_axis_angle(axis, angle) euler = factorize_rotmat(rotmat, seq=seq,", "ori_repr == 'dcm': dcm = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr))", "''' p = np.copy(q) return normalize_quat(p) def quat_is_normalized(q): norm = np.linalg.norm(q) if math.isclose(norm,", "the surface of a unit sphere. Algorithm from Allen & Tildesley p. 349.", "dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True) return dcm def euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat", "angle = math.acos(np.dot(old, new)) axis = la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v, axis, angle) elif", "= math.fmod(angle, 2*math.pi) if angle < 0.0: angle = -angle axis = -axis", "= 2*(q2q3 - q0q1) rotmat[2,0] = 2*(q1q3 - q0q2) rotmat[2,1] = 2*(q2q3 +", "R[0,0] = axis[0]*axis[0]*icos + cos R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin R[0,2] = axis[0]*axis[2]*icos", "-axis if angle > math.pi: angle = 2*math.pi - angle axis = -axis", "angle def rotate_vector_axis_angle(v, axis, angle): ''' Rotates vectors about axis by angle. '''", "= axis[0]*axis[1]*icos + axis[2]*sin R[1,1] = axis[1]*axis[1]*icos + cos R[1,2] = axis[1]*axis[2]*icos -", "p def invert_quat(q): ''' Inverts a quaternion in-place. ''' return conjugate_quat(q) def get_inverted_quat(q):", "mat = ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat, ang_vel) return qdot def ang_vel_to_quat_deriv_mat(q): q0, q1,", "if angle > math.pi: angle = 2*math.pi - angle axis = -axis return", "q2q3 = q[2]*q[3] rotmat[0,0] = 2*(q0sq + q1sq) - 1.0 rotmat[0,1] = 2*(q1q2", "seq='XYZ', world=True): rotmat = get_rotmat_axis_angle(axis, angle) euler = factorize_rotmat(rotmat, seq=seq, world=world) return euler", "def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False): shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward) return", "angle, seq=to_seq, world=to_world) elif ori_repr == 'dcm': euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else:", "axis = -axis return (axis, angle) def get_rand_axis_angle(): ''' Generates a random pair", "ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] axis,", "q[1:4] = -q[1:4] return q def get_conjugated_quat(q): ''' Conjugates a quaternion and returns", ": (3,3) ndarray The rows of A represent the orthonormal basis vectors of", "forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_dcm(dcm, forward=False): shiftmat = dcm", "= -p[1:4] return p def invert_quat(q): ''' Inverts a quaternion in-place. ''' return", "frame A. B : (3,3) ndarray The rows of B represent the orthonormal", "sequence used in Blender. #In contrast, the XYZ sequence is understood in the", "if ori_repr == 'quat': quat = np.array(orientation['quat']) elif ori_repr == 'euler': euler =", "of axis-angle. The axis is a random vector from the surface of a", "(world). First rotation about X, second rotation #about Y, and the third rotation", "True else: return False def get_quat_prod(p, q): p0, p1, p2, p3 = tuple(p)", "''' Conjugates a quaternion in-place. ''' q[1:4] = -q[1:4] return q def get_conjugated_quat(q):", "return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat = get_rotmat_axis_angle(-axis,", "B w.r.t. axes(i.e. frame) A. Parameters ---------- A : (3,3) ndarray The rows", "world = orientation['world'] quat = euler_to_quat(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis", "get_angle_between_quat(q1, q2) q = (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def get_angle_between_quat(p, q): '''", "dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return euler def", "get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return (axis, angle) def any_to_dcm(orientation): ori_repr = orientation['repr']", "elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] euler = axis_angle_to_euler(axis,", "ang_vel) return qdot def ang_vel_to_quat_deriv_mat(q): q0, q1, q2, q3 = tuple(q) return 0.5*np.array([[-q1,", "q0, -q3, q2], [-q2, q3, q0, -q1], [-q3, -q2, q1, q0]]) def ang_vel_to_quat_deriv(q,", "q1, q0]]) def ang_vel_to_quat_deriv(q, ang_vel): mat = ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat, ang_vel) return", "angle, seq='XYZ', world=True): rotmat = get_rotmat_axis_angle(axis, angle) euler = factorize_rotmat(rotmat, seq=seq, world=world) return", "= la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v, axis, angle) elif n == 2: z_old =", "shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_axis_angle(a, axis,", "represent the orthonormal basis vectors of frame B. Returns ------- (3,3) ndarray The", "== 'quat': quat = np.array(orientation['quat']) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq", "(q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def get_angle_between_quat(p, q): ''' Returns the angle between", "np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat = get_rotmat_axis_angle(-axis, angle)", "- 1.0 rotmat[1,2] = 2*(q2q3 - q0q1) rotmat[2,0] = 2*(q1q3 - q0q2) rotmat[2,1]", "''' return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot): mat = quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot) def", "''' Normalizes a quaternion in-place. ''' q /= np.linalg.norm(q) return q def get_normalized_quat(q):", "mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm, seq='XYZ',", "is the same as the sequence used in Blender. #In contrast, the XYZ", "from . import linalg as la from . import eulang #Euler angle sequence:", "quaternion and returns it as a copy. ''' p = np.copy(q) return normalize_quat(p)", "''' axis = np.zeros((3,)) #Generate angle: A uniform random number from [0.0, 2*pi)", "n = old.shape[0] if n == 1: angle = math.acos(np.dot(old, new)) axis =", "quat_to_dcm(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world =", "get_shiftmat_dcm(dcm, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward)", "get_rotmat_quat(q): rotmat = np.empty((3,3)) q0sq = q[0]**2 q1sq = q[1]**2 q2sq = q[2]**2", "quat = euler_to_quat(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle", "return fix_axis_angle(axis, angle) def axis_angle_to_quat(axis, angle): w = math.cos(angle/2) v = math.sin(angle/2)*axis q", "def axis_angle_to_quat(axis, angle): w = math.cos(angle/2) v = math.sin(angle/2)*axis q = np.array([w, v[0],", "of frame B w.r.t. frame A. ''' return np.dot(B, A.T) def dcm_to_quat(dcm): mat", "seq='XYZ', world=True): ''' Rotates vectors about axis by angle. ''' rotmat = get_rotmat_euler(euler,", "of B represent the orthonormal basis vectors of frame B. Returns ------- (3,3)", "= axis[0]*axis[2]*icos + axis[1]*sin R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin R[1,1] = axis[1]*axis[1]*icos +", "and returns it as a copy. ''' p = np.copy(q) return normalize_quat(p) def", "q3, q0, -q1], [-q3, -q2, q1, q0]]) def ang_vel_to_quat_deriv(q, ang_vel): mat = ang_vel_to_quat_deriv_mat(q)", "np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return", "= np.array(orientation['axis']) angle = orientation['angle'] quat = axis_angle_to_quat(axis, angle) elif ori_repr == 'dcm':", "sequence is understood in the Aerospace community as: #First rotation about Z-axis, second", "and the third #rotation about X-axis of the body frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis,", "[-q3, -q2, q1, q0]]) def ang_vel_to_quat_deriv(q, ang_vel): mat = ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat,", "instance. ''' p = np.copy(q) return conjugate_quat(p) def normalize_quat(q): ''' Normalizes a quaternion", "shiftmat, a) def shift_tensor3_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return", "rotmat = get_rotmat_euler(euler, seq=seq, world=world) if forward: shiftmat = rotmat.T else: shiftmat =", "0.0]) def get_rand_quat(): axis, angle = get_rand_axis_angle() return axis_angle_to_quat(axis, angle) def get_perturbed_quat(q): raise", "world, to_seq, to_world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle']", "q1], [ q2, -q1, q0]]) #Other functions------------------------------------------------------ def translate(v, delta): ''' Translates vectors", "2*(q1q3 - q0q2) rotmat[2,1] = 2*(q2q3 + q0q1) rotmat[2,2] = 2*(q0sq + q3sq)", "np.copy(q) return conjugate_quat(p) def normalize_quat(q): ''' Normalizes a quaternion in-place. ''' q /=", "rotation about X, second rotation #about Y, and the third rotation about Z", "tuple(p) prod_mat = np.array([[p0, -p1, -p2, -p3], [p1, p0, -p3, p2], [p2, p3,", "''' n = v.shape[0] for i in range(n): v[i,:] += delta return v", "norm angle = math.fmod(angle, 2*math.pi) if angle < 0.0: angle = -angle axis", "forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_quat(q, forward=False): if forward: shiftmat", "Algorithm from Allen & Tildesley p. 349. ''' axis = np.zeros((3,)) #Generate angle:", "shiftmat, shiftmat, shiftmat, a) def get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat = get_rotmat_axis_angle(-axis, angle) if", "of the world(i.e. fixed) frame. #This is the same as the sequence used", "sequence: XYZ (world). First rotation about X, second rotation #about Y, and the", "rotmat = get_rotmat_euler(euler, seq=seq, world=world) axis, angle = extract_axis_angle_from_rotmat(rotmat) return (axis, angle) def", "quat_deriv_to_ang_vel(q, qdot): mat = quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q): q0, q1, q2,", "2.0*np.random.random() - 1.0 zeta2 = 2.0*np.random.random() - 1.0 zetasq = zeta1**2 + zeta2**2", "angle) def dcm_to_euler(dcm, seq='XYZ', world=True): mat = get_rotmat_dcm(dcm) euler = factorize_rotmat(mat, seq=seq, world=world)", "return 0.5*np.array([[-q1, -q2, -q3], [ q0, q3, -q2], [-q3, q0, q1], [ q2,", "2*(q2q3 - q0q1) rotmat[2,0] = 2*(q1q3 - q0q2) rotmat[2,1] = 2*(q2q3 + q0q1)", "q = np.array([w, v[0], v[1], v[2]]) return normalize_quat(q) def axis_angle_to_euler(axis, angle, seq='XYZ', world=True):", "= la.unitized(np.cross(old[0,:], old[1,:])) z_new = la.unitized(np.cross(new[0,:], new[1,:])) axes_old = np.vstack((old, z_old)) axes_new =", "orientation['seq'] world = orientation['world'] quat = euler_to_quat(euler, seq=seq, world=world) elif ori_repr == 'axis_angle':", "q)) return pq def interpolate_quat(q1, q2, t): theta = get_angle_between_quat(q1, q2) q =", "axis[1]*sin R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin R[2,2] = axis[2]*axis[2]*icos + cos return R", "seq=seq, world=world) axis, angle = extract_axis_angle_from_rotmat(rotmat) return (axis, angle) def any_to_euler(orientation, to_seq, to_world):", "dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v, dcm) elif n == 3: dcm = dcm_from_axes(old, new)", "cos = np.cos(angle) icos = 1.0 - cos R[0,0] = axis[0]*axis[0]*icos + cos", "= np.zeros((3,)) #Generate angle: A uniform random number from [0.0, 2*pi) angle =", "- 1.0 zetasq = zeta1**2 + zeta2**2 if zetasq <= 1.0: break rt", "Inverts a quaternion in-place. ''' return conjugate_quat(q) def get_inverted_quat(q): ''' Inverts a quaternion", "old[1,:])) z_new = la.unitized(np.cross(new[0,:], new[1,:])) axes_old = np.vstack((old, z_old)) axes_new = np.vstack((new, z_new))", "p3 = tuple(p) prod_mat = np.array([[p0, -p1, -p2, -p3], [p1, p0, -p3, p2],", "represent the orthonormal basis vectors of frame A. B : (3,3) ndarray The", "world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] dcm =", "get_rotmat_quat(q) return shiftmat def conjugate_quat(q): ''' Conjugates a quaternion in-place. ''' q[1:4] =", "forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False): rotmat", "''' Inverts a quaternion in-place. ''' return conjugate_quat(q) def get_inverted_quat(q): ''' Inverts a", "rotmat[1,2] u1 = rotmat[0,2] - rotmat[2,0] u2 = rotmat[1,0] - rotmat[0,1] else: #Find", "= np.vstack((old, z_old)) axes_new = np.vstack((new, z_new)) dcm = dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v,", "forward=False): rotmat = get_rotmat_euler(euler, seq=seq, world=world) if forward: shiftmat = rotmat.T else: shiftmat", "def any_to_dcm(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) dcm", "s = 1.0/(2*u1) u0 = s*rotmat[0,1] u2 = s*rotmat[1,2] elif k == 2:", "{0}'.format(ori_repr)) return axis, angle def rotate_vector_axis_angle(v, axis, angle): ''' Rotates vectors about axis", "= axis[0]*axis[1]*icos - axis[2]*sin R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin R[1,0] = axis[0]*axis[1]*icos +", "= s*rotmat[0,1] u2 = s*rotmat[1,2] elif k == 2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s", "q2, t): theta = get_angle_between_quat(q1, q2) q = (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q)", "k == 2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2) u0 = s*rotmat[0,2] u1", "raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return dcm def rotate_vector_dcm(v, dcm): rotmat =", "axis[2] = 1.0 - 2.0*zetasq return fix_axis_angle(axis, angle) def axis_angle_to_quat(axis, angle): w =", "get_rand_axis_angle() return axis_angle_to_quat(axis, angle) def get_perturbed_quat(q): raise NotImplementedError def quat_to_axis_angle(q): angle = 2*math.acos(q[0])", "= s*rotmat[0,2] u1 = s*rotmat[1,2] else: u0 = 1.0 u1 = 0.0 u2", "seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] dcm", "1.0 zetasq = zeta1**2 + zeta2**2 if zetasq <= 1.0: break rt =", "2.0*zeta2*rt axis[2] = 1.0 - 2.0*zetasq return fix_axis_angle(axis, angle) def axis_angle_to_quat(axis, angle): w", "else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return dcm def rotate_vector_dcm(v, dcm): rotmat", "= 2*(q1q2 - q0q3) rotmat[0,2] = 2*(q1q3 + q0q2) rotmat[1,0] = 2*(q1q2 +", "2.0*zeta1*rt axis[1] = 2.0*zeta2*rt axis[2] = 1.0 - 2.0*zetasq return fix_axis_angle(axis, angle) def", "axis-angle. The axis is a random vector from the surface of a unit", "shiftmat, a) def get_shiftmat_quat(q, forward=False): if forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat =", "= dcm if not forward: shiftmat = shiftmat.T return shiftmat #Euler angle----------------------------------------------------------- def", "math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0) u1 = s*rotmat[0,1] u2 = s*rotmat[0,2] elif k ==", "True: #Generate two uniform random numbers from [-1, 1) zeta1 = 2.0*np.random.random() -", "a random vector from the surface of a unit sphere. Algorithm from Allen", "''' Rotates vectors about axis by angle. ''' rotmat = get_rotmat_axis_angle(axis, angle) return", "if ori_repr == 'quat': quat = np.array(orientation['quat']) euler = quat_to_euler(quat, seq=to_seq, world=to_world) elif", "+ q0q2) rotmat[1,0] = 2*(q1q2 + q0q3) rotmat[1,1] = 2*(q0sq + q2sq) -", "world=True): ''' Rotates vectors about axis by angle. ''' rotmat = get_rotmat_euler(euler, seq=seq,", "zetasq <= 1.0: break rt = np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt axis[1] = 2.0*zeta2*rt", "axis, angle def rotate_vector_axis_angle(v, axis, angle): ''' Rotates vectors about axis by angle.", "world=True): return eulang.factor_rotmat(rotmat, seq=seq, world=world) def euler_to_euler(euler, seq, world, to_seq, to_world): rotmat =", "2: z_old = la.unitized(np.cross(old[0,:], old[1,:])) z_new = la.unitized(np.cross(new[0,:], new[1,:])) axes_old = np.vstack((old, z_old))", "factorize_rotmat(mat, seq=seq, world=world) return euler def dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm) axis, angle =", "= np.random.random((4,)) return normalize_quat(q) def get_identity_quat(): return np.array([1.0, 0.0, 0.0, 0.0]) def get_rand_quat():", "shift_tensor2_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def", "= orientation['seq'] world = orientation['world'] dcm = euler_to_dcm(euler, seq=seq, world=world) elif ori_repr ==", "return np.dot(v, rotmat.T) def get_rotmat_dcm(dcm): return dcm.T def shift_vector_dcm(v, dcm, forward=False): shiftmat =", "if angle < math.pi: axis = q[1:4]/sin else: rotmat = get_rotmat_quat(q) axis, angle", "q2, q3 = tuple(q) return 2*np.array([[-q1, q0, -q3, q2], [-q2, q3, q0, -q1],", "euler def rotate_vector_euler(v, euler, seq='XYZ', world=True): ''' Rotates vectors about axis by angle.", "= 1.0/(2*u2) u0 = s*rotmat[0,2] u1 = s*rotmat[1,2] else: u0 = 1.0 u1", "#Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ', world=True): return eulang.factor_rotmat(rotmat, seq=seq, world=world) def euler_to_euler(euler, seq,", "u2 = rotmat[1,0] - rotmat[0,1] else: #Find the largest entry in the diagonal", "return qdot def ang_vel_to_quat_deriv_mat(q): q0, q1, q2, q3 = tuple(q) return 0.5*np.array([[-q1, -q2,", "axis, angle = extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm, seq='XYZ', world=True): mat =", "axis[1]*axis[2]*icos + axis[0]*sin R[2,2] = axis[2]*axis[2]*icos + cos return R def extract_axis_angle_from_rotmat(rotmat): trace", "world=True): rotmat = get_rotmat_axis_angle(axis, angle) euler = factorize_rotmat(rotmat, seq=seq, world=world) return euler def", "shift_tensor3_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat,", "def rotate_vector_axis_angle(v, axis, angle): ''' Rotates vectors about axis by angle. ''' rotmat", "random vector from the surface of a unit sphere. Algorithm from Allen &", "[p1, p0, -p3, p2], [p2, p3, p0, -p1], [p3, -p2, p1, p0]]) pq", "= dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return dcm def rotate_vector_dcm(v,", "axis_angle_to_euler(axis, angle, seq='XYZ', world=True): rotmat = get_rotmat_axis_angle(axis, angle) euler = factorize_rotmat(rotmat, seq=seq, world=world)", "extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat) angle = math.acos((trace-1)/2) if angle > 0: if angle", "world=True): return eulang.rotmat_euler(euler, seq=seq, world=world) def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False): shiftmat =", "new)) return rotate_vector_axis_angle(v, axis, angle) elif n == 2: z_old = la.unitized(np.cross(old[0,:], old[1,:]))", "angle = orientation['angle'] quat = axis_angle_to_quat(axis, angle) elif ori_repr == 'dcm': quat =", "from [0.0, 2*pi) angle = 2.0*math.pi*np.random.random() while True: #Generate two uniform random numbers", "elif n == 2: z_old = la.unitized(np.cross(old[0,:], old[1,:])) z_new = la.unitized(np.cross(new[0,:], new[1,:])) axes_old", "rotmat = get_rotmat_axis_angle(axis, angle) return np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis, angle): R = np.zeros((3,3))", "euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def", "def dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return (axis, angle) def", "q. ''' return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot): mat = quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot)", "q): rotmat = get_rotmat_quat(q) return np.dot(v, rotmat.T) def get_rotmat_quat(q): rotmat = np.empty((3,3)) q0sq", "u1 = rotmat[0,2] - rotmat[2,0] u2 = rotmat[1,0] - rotmat[0,1] else: #Find the", "'quat': quat = np.array(orientation['quat']) euler = quat_to_euler(quat, seq=to_seq, world=to_world) elif ori_repr == 'euler':", "0.5*np.array([[-q1, -q2, -q3], [ q0, q3, -q2], [-q3, q0, q1], [ q2, -q1,", "(3,3) ndarray The rows of A represent the orthonormal basis vectors of frame", "math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2) u0 = s*rotmat[0,2] u1 = s*rotmat[1,2] else: u0 =", "pq def interpolate_quat(q1, q2, t): theta = get_angle_between_quat(q1, q2) q = (q1*math.sin((1.0-t)*theta) +", "euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] euler = euler_to_euler(euler, seq,", "rows of B represent the orthonormal basis vectors of frame B. Returns -------", "orthonormal basis vectors of frame B. Returns ------- (3,3) ndarray The dcm of", "get_identity_quat(): return np.array([1.0, 0.0, 0.0, 0.0]) def get_rand_quat(): axis, angle = get_rand_axis_angle() return", "- axis[0]*sin R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin R[2,2]", "Normalizes a quaternion and returns it as a copy. ''' p = np.copy(q)", "-q3], [ q0, q3, -q2], [-q3, q0, q1], [ q2, -q1, q0]]) #Other", "shiftmat, a) def shift_tensor3_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat,", "shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a, axis, angle, forward=False):", "= np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] euler = euler_to_euler(euler, seq, world,", "-p2, p1, p0]]) pq = normalize_quat(np.dot(prod_mat, q)) return pq def interpolate_quat(q1, q2, t):", "= math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2) u0 = s*rotmat[0,2] u1 = s*rotmat[1,2] else: u0", "def quat_deriv_to_ang_vel(q, qdot): mat = quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q): q0, q1,", "contrast, the XYZ sequence is understood in the Aerospace community as: #First rotation", "forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return", "0.0: angle = -angle axis = -axis if angle > math.pi: angle =", "world=world, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward)", "world=world) def quat_to_dcm(q): return get_shiftmat_quat(q, forward=True) def any_to_quat(orientation): ori_repr = orientation['repr'] if ori_repr", "''' q /= np.linalg.norm(q) return q def get_normalized_quat(q): ''' Normalizes a quaternion and", "rotate_vector_dcm(v, dcm) def mat_is_dcm(mat): return mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12,", "get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_dcm(a, dcm, forward=False): shiftmat =", "a quaternion and returns it as a copy. ''' p = np.copy(q) return", "def shift_tensor2_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a)", "2*(q1q2 + q0q3) rotmat[1,1] = 2*(q0sq + q2sq) - 1.0 rotmat[1,2] = 2*(q2q3", "shiftmat = shiftmat.T return shiftmat #Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ', world=True): return eulang.factor_rotmat(rotmat,", "shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_dcm(dcm, forward=False):", "dcm def euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat = get_rotmat_euler(euler, seq=seq, world=world) axis, angle =", "shiftmat, shiftmat, a) def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False): rotmat = get_rotmat_euler(euler, seq=seq, world=world)", "angle) return np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis, angle): R = np.zeros((3,3)) sin = np.sin(angle)", "The axis is a random vector from the surface of a unit sphere.", "euler_to_quat(euler, seq='XYZ', world=True): axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) return axis_angle_to_quat(axis, angle) def", "q[0]*q[2] q0q3 = q[0]*q[3] q1q2 = q[1]*q[2] q1q3 = q[1]*q[3] q2q3 = q[2]*q[3]", "shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_quat(a, quat, forward=False):", "np.array([1.0, 0.0, 0.0]) return fix_axis_angle(axis, angle, normalize=True) def quat_to_euler(q, seq='XYZ', world=True): rotmat =", "in-place. ''' q[1:4] = -q[1:4] return q def get_conjugated_quat(q): ''' Conjugates a quaternion", "p = np.copy(q) return normalize_quat(p) def quat_is_normalized(q): norm = np.linalg.norm(q) if math.isclose(norm, 1.0,", "'quat': quat = np.array(orientation['quat']) dcm = quat_to_dcm(quat) elif ori_repr == 'euler': euler =", "shiftmat, a) def shift_tensor3_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat,", "mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12) is_orthogonal = np.allclose(np.dot(mat, mat.T),", "else: axis = np.array([1.0, 0.0, 0.0]) return fix_axis_angle(axis, angle, normalize=True) def quat_to_euler(q, seq='XYZ',", "= axis_angle_to_dcm(axis, angle) elif ori_repr == 'dcm': dcm = dcm_to_quat(orientation['dcm']) else: raise ValueError(", "theta = get_angle_between_quat(q1, q2) q = (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def get_angle_between_quat(p,", "rel_tol=1e-14): axis /= norm angle = math.fmod(angle, 2*math.pi) if angle < 0.0: angle", "dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_dcm(a,", "elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] elif ori_repr ==", "shiftmat = get_shiftmat_quat(q, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_quat(a, quat, forward=False): shiftmat =", "0.0]) return fix_axis_angle(axis, angle, normalize=True) def quat_to_euler(q, seq='XYZ', world=True): rotmat = get_rotmat_quat(q) return", "= math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12) is_orthogonal = np.allclose(np.dot(mat, mat.T), np.identity(3)) return is_orthogonal and", "be unit vectors. ''' assert old.shape[0] == new.shape[0] n = old.shape[0] if n", "axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world) elif ori_repr == 'dcm': euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world)", "axis = -axis if angle > math.pi: angle = 2*math.pi - angle axis", "= 2*(q0sq + q3sq) - 1.0 return rotmat def shift_vector_quat(v, q, forward=False): shiftmat", "shiftmat = shiftmat.T return shiftmat #Direction cosine matrix----------------------------------------------- def dcm_from_axes(A, B): ''' Returns", "= get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_dcm(dcm, forward=False): shiftmat", "seq=to_seq, world=to_world) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world", "direction cosine matrix of axes(i.e. frame) B w.r.t. axes(i.e. frame) A. Parameters ----------", "u0 = s*rotmat[0,1] u2 = s*rotmat[1,2] elif k == 2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2", "euler def dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return (axis, angle)", "seq='XYZ', world=True): rotmat = get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq, world=world) def quat_to_dcm(q): return get_shiftmat_quat(q,", "vectors of frame B. Returns ------- (3,3) ndarray The dcm of frame B", "= get_shiftmat_dcm(dcm, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm,", "returns it as a new instance. ''' p = np.copy(q) return conjugate_quat(p) def", "dcm = get_shiftmat_axis_angle(axis, angle, forward=True) return dcm def any_to_axis_angle(orientation): ori_repr = orientation['repr'] if", "axis[0]*sin R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin R[2,2] =", "returns a copy. ''' p = np.copy(q) p[1:4] = -p[1:4] return p def", "angle, normalize=True): if normalize: norm = np.linalg.norm(axis) if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14):", "[-q2, q3, q0, -q1], [-q3, -q2, q1, q0]]) def ang_vel_to_quat_deriv(q, ang_vel): mat =", "axis[1]*sin R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin R[1,1] = axis[1]*axis[1]*icos + cos R[1,2] =", "orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) elif ori_repr == 'euler': euler", "= get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_quat(q, forward=False): if", "number from [0.0, 2*pi) angle = 2.0*math.pi*np.random.random() while True: #Generate two uniform random", "orientation repr {0}'.format(ori_repr)) return euler def rotate_vector_euler(v, euler, seq='XYZ', world=True): ''' Rotates vectors", "shiftmat.T return shiftmat #Direction cosine matrix----------------------------------------------- def dcm_from_axes(A, B): ''' Returns the direction", "= axis[2]*axis[2]*icos + cos return R def extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat) angle =", "== 'dcm': dcm = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return", "shiftmat, shiftmat, a) def get_shiftmat_quat(q, forward=False): if forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat", "q3 = tuple(q) return 0.5*np.array([[-q1, -q2, -q3], [ q0, q3, -q2], [-q3, q0,", "get_shiftmat_euler(euler, seq=seq, world=world, forward=True) return dcm def euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat = get_rotmat_euler(euler,", "angle): w = math.cos(angle/2) v = math.sin(angle/2)*axis q = np.array([w, v[0], v[1], v[2]])", "if zetasq <= 1.0: break rt = np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt axis[1] =", "import linalg as la from . import eulang #Euler angle sequence: XYZ (world).", "ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] quat", "mat = quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q): q0, q1, q2, q3 =", "the direction cosine matrix of axes(i.e. frame) B w.r.t. axes(i.e. frame) A. Parameters", "dcm def rotate_vector_dcm(v, dcm): rotmat = get_rotmat_dcm(dcm) return np.dot(v, rotmat.T) def get_rotmat_dcm(dcm): return", "return dcm def any_to_axis_angle(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat =", "q[0]*q[1] q0q2 = q[0]*q[2] q0q3 = q[0]*q[3] q1q2 = q[1]*q[2] q1q3 = q[1]*q[3]", "= q[1:4]/sin else: rotmat = get_rotmat_quat(q) axis, angle = extract_axis_angle_from_rotmat(rotmat) else: axis =", "prod_mat = np.array([[p0, -p1, -p2, -p3], [p1, p0, -p3, p2], [p2, p3, p0,", "unit vectors. ''' assert old.shape[0] == new.shape[0] n = old.shape[0] if n ==", "+ axis[1]*sin R[1,0] = axis[0]*axis[1]*icos + axis[2]*sin R[1,1] = axis[1]*axis[1]*icos + cos R[1,2]", "conjugate_quat(p) def normalize_quat(q): ''' Normalizes a quaternion in-place. ''' q /= np.linalg.norm(q) return", "euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world) elif ori_repr == 'dcm': euler = dcm_to_euler(orientation['dcm'],", "if angle > 0.0: if angle < math.pi: axis = q[1:4]/sin else: rotmat", "from [-1, 1) zeta1 = 2.0*np.random.random() - 1.0 zeta2 = 2.0*np.random.random() - 1.0", "get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False):", "return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False): rotmat =", "rotmat[1,0] - rotmat[0,1] else: #Find the largest entry in the diagonal of rotmat", "return np.dot(B, A.T) def dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return", "= 2*math.pi - angle axis = -axis return (axis, angle) def get_rand_axis_angle(): '''", "u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2 s = 1.0/(2*u0) u1 = s*rotmat[0,1] u2 = s*rotmat[0,2] elif", "orientation['seq'] world = orientation['world'] dcm = euler_to_dcm(euler, seq=seq, world=world) elif ori_repr == 'axis_angle':", "the largest entry in the diagonal of rotmat k = np.argmax(np.diag(rotmat)) if k", "return np.dot(v, shiftmat.T) def shift_tensor2_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq',", "= orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) euler = quat_to_euler(quat, seq=to_seq,", "X, second rotation #about Y, and the third rotation about Z axis of", "world = orientation['world'] dcm = euler_to_dcm(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis", "q0]]) #Other functions------------------------------------------------------ def translate(v, delta): ''' Translates vectors inplace by delta. '''", "return v def align(v, old, new): ''' old and new represent coordinate axes.", "dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_dcm(a, dcm, forward=False):", "angle. ''' rotmat = get_rotmat_euler(euler, seq=seq, world=world) return np.dot(v, rotmat.T) def get_rotmat_euler(euler, seq='XYZ',", "0.0, 0.0]) return fix_axis_angle(axis, angle, normalize=True) def quat_to_euler(q, seq='XYZ', world=True): rotmat = get_rotmat_quat(q)", "return mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12) is_orthogonal = np.allclose(np.dot(mat,", "as a copy. ''' p = np.copy(q) return normalize_quat(p) def quat_is_normalized(q): norm =", "u1 = 0.0 u2 = 0.0 return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True) def", "seq=seq, world=world, forward=True) return dcm def euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat = get_rotmat_euler(euler, seq=seq,", "axis, angle) elif n == 2: z_old = la.unitized(np.cross(old[0,:], old[1,:])) z_new = la.unitized(np.cross(new[0,:],", "axis[0]*axis[1]*icos + axis[2]*sin R[1,1] = axis[1]*axis[1]*icos + cos R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin", "if ori_repr == 'quat': quat = np.array(orientation['quat']) axis, angle = quat_to_axis_angle(quat) elif ori_repr", "> math.pi: angle = 2*math.pi - angle axis = -axis return (axis, angle)", "return (axis, angle) def any_to_euler(orientation, to_seq, to_world): ori_repr = orientation['repr'] if ori_repr ==", "np.array([1.0, 0.0, 0.0, 0.0]) def get_rand_quat(): axis, angle = get_rand_axis_angle() return axis_angle_to_quat(axis, angle)", "get_rotmat_dcm(dcm) return np.dot(v, rotmat.T) def get_rotmat_dcm(dcm): return dcm.T def shift_vector_dcm(v, dcm, forward=False): shiftmat", "math.acos(np.dot(old, new)) axis = la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v, axis, angle) elif n ==", "< math.pi: axis = q[1:4]/sin else: rotmat = get_rotmat_quat(q) axis, angle = extract_axis_angle_from_rotmat(rotmat)", "= orientation['world'] euler = euler_to_euler(euler, seq, world, to_seq, to_world) elif ori_repr == 'axis_angle':", "normalize=True) def shift_vector_axis_angle(v, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.dot(v,", "quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q): q0, q1, q2, q3 = tuple(q) return", "(axis, angle) def any_to_euler(orientation, to_seq, to_world): ori_repr = orientation['repr'] if ori_repr == 'quat':", "get_rotmat_axis_angle(axis, angle): R = np.zeros((3,3)) sin = np.sin(angle) cos = np.cos(angle) icos =", "forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat =", "= orientation['angle'] dcm = axis_angle_to_dcm(axis, angle) elif ori_repr == 'dcm': dcm = dcm_to_quat(orientation['dcm'])", "p1, p2, p3 = tuple(p) prod_mat = np.array([[p0, -p1, -p2, -p3], [p1, p0,", "A. B : (3,3) ndarray The rows of B represent the orthonormal basis", "get_quat_prod(p, q): p0, p1, p2, p3 = tuple(p) prod_mat = np.array([[p0, -p1, -p2,", "dcm.T def shift_vector_dcm(v, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.dot(v, shiftmat.T) def", "world(i.e. fixed) frame. #This is the same as the sequence used in Blender.", "[p3, -p2, p1, p0]]) pq = normalize_quat(np.dot(prod_mat, q)) return pq def interpolate_quat(q1, q2,", "1.0, abs_tol=1e-14, rel_tol=1e-14): axis /= norm angle = math.fmod(angle, 2*math.pi) if angle <", "The dcm of frame B w.r.t. frame A. ''' return np.dot(B, A.T) def", "rotate_vector_quat(v, q): rotmat = get_rotmat_quat(q) return np.dot(v, rotmat.T) def get_rotmat_quat(q): rotmat = np.empty((3,3))", "new): ''' old and new represent coordinate axes. They must be unit vectors.", "forward: shiftmat = shiftmat.T return shiftmat #Direction cosine matrix----------------------------------------------- def dcm_from_axes(A, B): '''", "random pair of axis-angle. The axis is a random vector from the surface", "def rotate_vector_quat(v, q): rotmat = get_rotmat_quat(q) return np.dot(v, rotmat.T) def get_rotmat_quat(q): rotmat =", "+ axis[0]*sin R[2,2] = axis[2]*axis[2]*icos + cos return R def extract_axis_angle_from_rotmat(rotmat): trace =", "= get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_dcm(a, dcm, forward=False): shiftmat", "if forward: shiftmat = rotmat.T else: shiftmat = rotmat return shiftmat #Quaternion----------------------------------------------------------- def", "def shift_tensor3_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat,", "np.array(orientation['axis']) angle = orientation['angle'] quat = axis_angle_to_quat(axis, angle) elif ori_repr == 'dcm': quat", "return axis_angle_to_quat(axis, angle) def euler_to_dcm(euler, seq='XYZ', world=True): dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True)", "= euler_to_axis_angle(euler, seq=seq, world=world) return axis_angle_to_quat(axis, angle) def euler_to_dcm(euler, seq='XYZ', world=True): dcm =", "q0q2 = q[0]*q[2] q0q3 = q[0]*q[3] q1q2 = q[1]*q[2] q1q3 = q[1]*q[3] q2q3", "[p2, p3, p0, -p1], [p3, -p2, p1, p0]]) pq = normalize_quat(np.dot(prod_mat, q)) return", "- 1.0 zeta2 = 2.0*np.random.random() - 1.0 zetasq = zeta1**2 + zeta2**2 if", "axis = np.array(orientation['axis']) angle = orientation['angle'] quat = axis_angle_to_quat(axis, angle) elif ori_repr ==", "quat_is_normalized(q): norm = np.linalg.norm(q) if math.isclose(norm, 1.0, rel_tol=1e-14): return True else: return False", "math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot): mat = quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q): q0,", "euler = euler_to_euler(euler, seq, world, to_seq, to_world) elif ori_repr == 'axis_angle': axis =", "s*rotmat[0,2] u1 = s*rotmat[1,2] else: u0 = 1.0 u1 = 0.0 u2 =", "Returns ------- (3,3) ndarray The dcm of frame B w.r.t. frame A. '''", "def shift_tensor3_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat,", "q, forward=False): shiftmat = get_shiftmat_quat(q, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_quat(a, quat, forward=False):", "- rotmat[2,0] u2 = rotmat[1,0] - rotmat[0,1] else: #Find the largest entry in", "basis vectors of frame B. Returns ------- (3,3) ndarray The dcm of frame", "q = (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def get_angle_between_quat(p, q): ''' Returns the", "euler, seq='XYZ', world=True): ''' Rotates vectors about axis by angle. ''' rotmat =", "raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return axis, angle def rotate_vector_axis_angle(v, axis, angle):", "-q1], [-q3, -q2, q1, q0]]) def ang_vel_to_quat_deriv(q, ang_vel): mat = ang_vel_to_quat_deriv_mat(q) qdot =", "+ q2sq) - 1.0 rotmat[1,2] = 2*(q2q3 - q0q1) rotmat[2,0] = 2*(q1q3 -", "ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) dcm = quat_to_dcm(quat)", "= orientation['world'] dcm = euler_to_dcm(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis =", "forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_euler(a, euler,", "rotmat = get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq, world=world) def quat_to_dcm(q): return get_shiftmat_quat(q, forward=True) def", "-q3, q2], [-q2, q3, q0, -q1], [-q3, -q2, q1, q0]]) def ang_vel_to_quat_deriv(q, ang_vel):", "np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] dcm = euler_to_dcm(euler, seq=seq, world=world) elif", "from . import eulang #Euler angle sequence: XYZ (world). First rotation about X,", "#In contrast, the XYZ sequence is understood in the Aerospace community as: #First", "shiftmat, shiftmat, a) def get_shiftmat_dcm(dcm, forward=False): shiftmat = dcm if not forward: shiftmat", "def get_rotmat_axis_angle(axis, angle): R = np.zeros((3,3)) sin = np.sin(angle) cos = np.cos(angle) icos", "rotmat[0,0] = 2*(q0sq + q1sq) - 1.0 rotmat[0,1] = 2*(q1q2 - q0q3) rotmat[0,2]", "+ q1sq) - 1.0 rotmat[0,1] = 2*(q1q2 - q0q3) rotmat[0,2] = 2*(q1q3 +", "#rotation about X-axis of the body frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle, normalize=True): if", "3: dcm = dcm_from_axes(old, new) return rotate_vector_dcm(v, dcm) def mat_is_dcm(mat): return mat_is_rotmat(mat) def", "euler = factorize_rotmat(mat, seq=seq, world=world) return euler def dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm) axis,", "axis is a random vector from the surface of a unit sphere. Algorithm", "rotmat = np.empty((3,3)) q0sq = q[0]**2 q1sq = q[1]**2 q2sq = q[2]**2 q3sq", "shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_euler(a, euler, forward=False):", "return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_dcm(dcm, forward=False): shiftmat = dcm if", "euler, seq='XYZ', world=True, forward=False): shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward) return np.dot(v, shiftmat.T)", "if n == 1: angle = math.acos(np.dot(old, new)) axis = la.unitized(np.cross(old, new)) return", "ang_vel_to_quat_deriv(q, ang_vel): mat = ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat, ang_vel) return qdot def ang_vel_to_quat_deriv_mat(q):", "def axis_angle_to_dcm(axis, angle): dcm = get_shiftmat_axis_angle(axis, angle, forward=True) return dcm def any_to_axis_angle(orientation): ori_repr", "old, new): ''' old and new represent coordinate axes. They must be unit", "np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return", "euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] dcm = euler_to_dcm(euler, seq=seq,", "get_shiftmat_quat(q, forward=True) def any_to_quat(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat =", "basis vectors of frame A. B : (3,3) ndarray The rows of B", "any_to_dcm(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) dcm =", "= q[0]*q[3] q1q2 = q[1]*q[2] q1q3 = q[1]*q[3] q2q3 = q[2]*q[3] rotmat[0,0] =", "= 1.0 - cos R[0,0] = axis[0]*axis[0]*icos + cos R[0,1] = axis[0]*axis[1]*icos -", "= factorize_rotmat(rotmat, seq=seq, world=world) return euler def axis_angle_to_dcm(axis, angle): dcm = get_shiftmat_axis_angle(axis, angle,", "np.linalg.norm(q) return q def get_normalized_quat(q): ''' Normalizes a quaternion and returns it as", "cos return R def extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat) angle = math.acos((trace-1)/2) if angle", "mat_is_dcm(mat): return mat_is_rotmat(mat) def mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12) is_orthogonal =", "functions------------------------------------------------------ def translate(v, delta): ''' Translates vectors inplace by delta. ''' n =", "s*rotmat[0,1] u2 = s*rotmat[0,2] elif k == 1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s =", "world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] quat =", "to_seq, to_world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] euler", "and returns it as a new instance. ''' p = np.copy(q) return conjugate_quat(p)", "seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] quat", "= tuple(p) prod_mat = np.array([[p0, -p1, -p2, -p3], [p1, p0, -p3, p2], [p2,", "return p def invert_quat(q): ''' Inverts a quaternion in-place. ''' return conjugate_quat(q) def", "axes_new) return rotate_vector_dcm(v, dcm) elif n == 3: dcm = dcm_from_axes(old, new) return", "np.array(orientation['quat']) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world =", "= old.shape[0] if n == 1: angle = math.acos(np.dot(old, new)) axis = la.unitized(np.cross(old,", "axes(i.e. frame) A. Parameters ---------- A : (3,3) ndarray The rows of A", "B w.r.t. frame A. ''' return np.dot(B, A.T) def dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm)", "fix_axis_angle(axis, angle, normalize=True): if normalize: norm = np.linalg.norm(axis) if not math.isclose(norm, 1.0, abs_tol=1e-14,", "def get_conjugated_quat(q): ''' Conjugates a quaternion and returns a copy. ''' p =", "axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat,", "axis, angle = extract_axis_angle_from_rotmat(rotmat) else: axis = np.array([1.0, 0.0, 0.0]) return fix_axis_angle(axis, angle,", "= math.cos(angle/2) v = math.sin(angle/2)*axis q = np.array([w, v[0], v[1], v[2]]) return normalize_quat(q)", "[ q2, -q1, q0]]) #Other functions------------------------------------------------------ def translate(v, delta): ''' Translates vectors inplace", "= np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] axis, angle = euler_to_axis_angle(euler, seq=seq,", "import math import numpy as np from . import linalg as la from", "'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] axis, angle =", "< 0.0: angle = -angle axis = -axis if angle > math.pi: angle", "axis /= norm angle = math.fmod(angle, 2*math.pi) if angle < 0.0: angle =", "= np.array([1.0, 0.0, 0.0]) return fix_axis_angle(axis, angle, normalize=True) def quat_to_euler(q, seq='XYZ', world=True): rotmat", "world=True): rotmat = get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq, world=world) def quat_to_dcm(q): return get_shiftmat_quat(q, forward=True)", "= axis[1]*axis[2]*icos + axis[0]*sin R[2,2] = axis[2]*axis[2]*icos + cos return R def extract_axis_angle_from_rotmat(rotmat):", "'dcm': euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr))", "The rows of A represent the orthonormal basis vectors of frame A. B", "def dcm_from_axes(A, B): ''' Returns the direction cosine matrix of axes(i.e. frame) B", "return rotate_vector_dcm(v, dcm) elif n == 3: dcm = dcm_from_axes(old, new) return rotate_vector_dcm(v,", "is understood in the Aerospace community as: #First rotation about Z-axis, second rotation", "angle = orientation['angle'] euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world) elif ori_repr == 'dcm':", "rotmat.T) def get_rotmat_dcm(dcm): return dcm.T def shift_vector_dcm(v, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward)", "in the diagonal of rotmat k = np.argmax(np.diag(rotmat)) if k == 0: u0", "n == 2: z_old = la.unitized(np.cross(old[0,:], old[1,:])) z_new = la.unitized(np.cross(new[0,:], new[1,:])) axes_old =", "orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) euler = quat_to_euler(quat, seq=to_seq, world=to_world)", "about Z-axis, second rotation about Y-axis, and the third #rotation about X-axis of", "shiftmat.T) def shift_tensor2_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat,", "ori_repr == 'dcm': axis, angle = dcm_to_axis_angle(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr", "to_world): rotmat = get_rotmat_euler(euler, seq=seq, world=world) return factorize_rotmat(rotmat, seq=to_seq, world=to_world) def euler_to_quat(euler, seq='XYZ',", "2.0*zetasq return fix_axis_angle(axis, angle) def axis_angle_to_quat(axis, angle): w = math.cos(angle/2) v = math.sin(angle/2)*axis", "q): ''' Returns the angle between two quaternions p and q. ''' return", "numbers from [-1, 1) zeta1 = 2.0*np.random.random() - 1.0 zeta2 = 2.0*np.random.random() -", "get_shiftmat_quat(q, forward=False): if forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat = get_rotmat_quat(q) return shiftmat", "dcm def any_to_axis_angle(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat'])", "= np.array(orientation['quat']) axis, angle = quat_to_axis_angle(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler'])", "angle < math.pi: u0 = rotmat[2,1] - rotmat[1,2] u1 = rotmat[0,2] - rotmat[2,0]", "quat = np.array(orientation['quat']) euler = quat_to_euler(quat, seq=to_seq, world=to_world) elif ori_repr == 'euler': euler", "the same as the sequence used in Blender. #In contrast, the XYZ sequence", "rotmat = get_rotmat_axis_angle(axis, angle) euler = factorize_rotmat(rotmat, seq=seq, world=world) return euler def axis_angle_to_dcm(axis,", "second rotation about Y-axis, and the third #rotation about X-axis of the body", "== 3: dcm = dcm_from_axes(old, new) return rotate_vector_dcm(v, dcm) def mat_is_dcm(mat): return mat_is_rotmat(mat)", "angle, normalize=True) def quat_to_euler(q, seq='XYZ', world=True): rotmat = get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq, world=world)", "return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis,", "q0]]) def ang_vel_to_quat_deriv(q, ang_vel): mat = ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat, ang_vel) return qdot", "vectors about axis by angle. ''' rotmat = get_rotmat_euler(euler, seq=seq, world=world) return np.dot(v,", "import eulang #Euler angle sequence: XYZ (world). First rotation about X, second rotation", "quaternions p and q. ''' return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot): mat = quat_deriv_to_ang_vel_mat(q)", "seq, world, to_seq, to_world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle =", "euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat = get_rotmat_euler(euler, seq=seq, world=world) axis, angle = extract_axis_angle_from_rotmat(rotmat) return", "seq=seq, world=world) return np.dot(v, rotmat.T) def get_rotmat_euler(euler, seq='XYZ', world=True): return eulang.rotmat_euler(euler, seq=seq, world=world)", "seq=seq, world=world) return factorize_rotmat(rotmat, seq=to_seq, world=to_world) def euler_to_quat(euler, seq='XYZ', world=True): axis, angle =", "return euler def rotate_vector_euler(v, euler, seq='XYZ', world=True): ''' Rotates vectors about axis by", "= axis[1]*axis[2]*icos - axis[0]*sin R[2,0] = axis[2]*axis[0]*icos - axis[1]*sin R[2,1] = axis[1]*axis[2]*icos +", "Generates a random pair of axis-angle. The axis is a random vector from", "p = np.copy(q) return conjugate_quat(p) def normalize_quat(q): ''' Normalizes a quaternion in-place. '''", "def dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle) def", "== new.shape[0] n = old.shape[0] if n == 1: angle = math.acos(np.dot(old, new))", "u1, u2]), angle, normalize=True) def shift_vector_axis_angle(v, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle,", "world=world) def shift_vector_euler(v, euler, seq='XYZ', world=True, forward=False): shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward)", "elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] quat = axis_angle_to_quat(axis,", "1.0/(2*u0) u1 = s*rotmat[0,1] u2 = s*rotmat[0,2] elif k == 1: u1 =", "= get_rotmat_euler(euler, seq=seq, world=world) axis, angle = extract_axis_angle_from_rotmat(rotmat) return (axis, angle) def any_to_euler(orientation,", "= q[0]**2 q1sq = q[1]**2 q2sq = q[2]**2 q3sq = q[3]**2 q0q1 =", "q1, q2, q3 = tuple(q) return 2*np.array([[-q1, q0, -q3, q2], [-q2, q3, q0,", "world = orientation['world'] axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) elif ori_repr == 'axis_angle':", "[ q0, q3, -q2], [-q3, q0, q1], [ q2, -q1, q0]]) #Other functions------------------------------------------------------", "== 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] elif ori_repr == 'dcm': axis,", "def get_rotmat_euler(euler, seq='XYZ', world=True): return eulang.rotmat_euler(euler, seq=seq, world=world) def shift_vector_euler(v, euler, seq='XYZ', world=True,", "the world(i.e. fixed) frame. #This is the same as the sequence used in", "q2], [-q2, q3, q0, -q1], [-q3, -q2, q1, q0]]) def ang_vel_to_quat_deriv(q, ang_vel): mat", "qdot) def quat_deriv_to_ang_vel_mat(q): q0, q1, q2, q3 = tuple(q) return 2*np.array([[-q1, q0, -q3,", "v = math.sin(angle/2)*axis q = np.array([w, v[0], v[1], v[2]]) return normalize_quat(q) def axis_angle_to_euler(axis,", "np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q): q0, q1, q2, q3 = tuple(q) return 2*np.array([[-q1, q0,", "= np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt axis[1] = 2.0*zeta2*rt axis[2] = 1.0 - 2.0*zetasq", "get_normalized_quat(q): ''' Normalizes a quaternion and returns it as a copy. ''' p", "return shiftmat def conjugate_quat(q): ''' Conjugates a quaternion in-place. ''' q[1:4] = -q[1:4]", "-angle axis = -axis if angle > math.pi: angle = 2*math.pi - angle", "of the body frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle, normalize=True): if normalize: norm =", "get_rotmat_euler(euler, seq=seq, world=world) axis, angle = extract_axis_angle_from_rotmat(rotmat) return (axis, angle) def any_to_euler(orientation, to_seq,", "p0]]) pq = normalize_quat(np.dot(prod_mat, q)) return pq def interpolate_quat(q1, q2, t): theta =", "= get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_axis_angle(a, axis, angle,", "np.dot(v, rotmat.T) def get_rotmat_euler(euler, seq='XYZ', world=True): return eulang.rotmat_euler(euler, seq=seq, world=world) def shift_vector_euler(v, euler,", "z_new)) dcm = dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v, dcm) elif n == 3: dcm", "np.sin(angle) cos = np.cos(angle) icos = 1.0 - cos R[0,0] = axis[0]*axis[0]*icos +", "orthonormal basis vectors of frame A. B : (3,3) ndarray The rows of", "def extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat) angle = math.acos((trace-1)/2) if angle > 0: if", "= np.array(orientation['quat']) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world", "def get_identity_quat(): return np.array([1.0, 0.0, 0.0, 0.0]) def get_rand_quat(): axis, angle = get_rand_axis_angle()", "angle) def euler_to_dcm(euler, seq='XYZ', world=True): dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True) return dcm", "q0q3 = q[0]*q[3] q1q2 = q[1]*q[2] q1q3 = q[1]*q[3] q2q3 = q[2]*q[3] rotmat[0,0]", "get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_quat(a, quat, forward=False): shiftmat =", "quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def", "np.zeros((3,3)) sin = np.sin(angle) cos = np.cos(angle) icos = 1.0 - cos R[0,0]", "world=to_world) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world =", "extract_axis_angle_from_rotmat(rotmat) else: axis = np.array([1.0, 0.0, 0.0]) return fix_axis_angle(axis, angle, normalize=True) def quat_to_euler(q,", "and q. ''' return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot): mat = quat_deriv_to_ang_vel_mat(q) return np.dot(mat,", "zetasq = zeta1**2 + zeta2**2 if zetasq <= 1.0: break rt = np.sqrt(1.0-zetasq)", "to_world): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) euler =", "= s*rotmat[1,2] else: u0 = 1.0 u1 = 0.0 u2 = 0.0 return", "shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_axis_angle(axis,", "= orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) dcm = quat_to_dcm(quat) elif", "def align(v, old, new): ''' old and new represent coordinate axes. They must", "rotate_vector_dcm(v, dcm): rotmat = get_rotmat_dcm(dcm) return np.dot(v, rotmat.T) def get_rotmat_dcm(dcm): return dcm.T def", "axis[2]*sin R[1,1] = axis[1]*axis[1]*icos + cos R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin R[2,0] =", "= orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) elif ori_repr == 'euler':", "import numpy as np from . import linalg as la from . import", "= np.cos(angle) icos = 1.0 - cos R[0,0] = axis[0]*axis[0]*icos + cos R[0,1]", "/= np.linalg.norm(q) return q def get_normalized_quat(q): ''' Normalizes a quaternion and returns it", "norm = np.linalg.norm(q) if math.isclose(norm, 1.0, rel_tol=1e-14): return True else: return False def", "def mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12) is_orthogonal = np.allclose(np.dot(mat, mat.T), np.identity(3))", "about X, second rotation #about Y, and the third rotation about Z axis", "First rotation about X, second rotation #about Y, and the third rotation about", "math import numpy as np from . import linalg as la from .", "= 2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2) if angle > 0.0: if angle < math.pi:", "- angle axis = -axis return (axis, angle) def get_rand_axis_angle(): ''' Generates a", "two uniform random numbers from [-1, 1) zeta1 = 2.0*np.random.random() - 1.0 zeta2", "get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat = get_rotmat_quat(q) return shiftmat def conjugate_quat(q): ''' Conjugates a quaternion", "= zeta1**2 + zeta2**2 if zetasq <= 1.0: break rt = np.sqrt(1.0-zetasq) axis[0]", "forward=True) return dcm def any_to_axis_angle(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat", "return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward)", "u1 = s*rotmat[0,1] u2 = s*rotmat[0,2] elif k == 1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2", "eulang #Euler angle sequence: XYZ (world). First rotation about X, second rotation #about", "def translate(v, delta): ''' Translates vectors inplace by delta. ''' n = v.shape[0]", "q3 = tuple(q) return 2*np.array([[-q1, q0, -q3, q2], [-q2, q3, q0, -q1], [-q3,", "rotmat return shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat(): q = np.random.random((4,)) return normalize_quat(q) def get_identity_quat():", "shiftmat, shiftmat, a) def shift_tensor3_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr',", "orientation['seq'] world = orientation['world'] euler = euler_to_euler(euler, seq, world, to_seq, to_world) elif ori_repr", "= orientation['seq'] world = orientation['world'] quat = euler_to_quat(euler, seq=seq, world=world) elif ori_repr ==", "quat def rotate_vector_quat(v, q): rotmat = get_rotmat_quat(q) return np.dot(v, rotmat.T) def get_rotmat_quat(q): rotmat", "return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward)", "random number from [0.0, 2*pi) angle = 2.0*math.pi*np.random.random() while True: #Generate two uniform", "= get_rotmat_quat(q) axis, angle = extract_axis_angle_from_rotmat(rotmat) else: axis = np.array([1.0, 0.0, 0.0]) return", "shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_quat(q, forward=False):", "angle, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_axis_angle(a, axis, angle, forward=False): shiftmat", "= 2.0*math.pi*np.random.random() while True: #Generate two uniform random numbers from [-1, 1) zeta1", "A uniform random number from [0.0, 2*pi) angle = 2.0*math.pi*np.random.random() while True: #Generate", "+ axis[2]*sin R[1,1] = axis[1]*axis[1]*icos + cos R[1,2] = axis[1]*axis[2]*icos - axis[0]*sin R[2,0]", "math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12) is_orthogonal = np.allclose(np.dot(mat, mat.T), np.identity(3)) return is_orthogonal and det_is_one", "a) def get_shiftmat_dcm(dcm, forward=False): shiftmat = dcm if not forward: shiftmat = shiftmat.T", "else: u0 = 1.0 u1 = 0.0 u2 = 0.0 return fix_axis_angle(np.array([u0, u1,", "shiftmat, shiftmat, a) def shift_tensor3_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr',", "as a new instance. ''' p = np.copy(q) return conjugate_quat(p) def normalize_quat(q): '''", "copy. ''' p = np.copy(q) p[1:4] = -p[1:4] return p def invert_quat(q): '''", "quat_to_axis_angle(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world =", "return conjugate_quat(p) def normalize_quat(q): ''' Normalizes a quaternion in-place. ''' q /= np.linalg.norm(q)", "between two quaternions p and q. ''' return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot): mat", "shiftmat, shiftmat, shiftmat, a) def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False): rotmat = get_rotmat_euler(euler, seq=seq,", "axis[0] = 2.0*zeta1*rt axis[1] = 2.0*zeta2*rt axis[2] = 1.0 - 2.0*zetasq return fix_axis_angle(axis,", "la.unitized(np.cross(new[0,:], new[1,:])) axes_old = np.vstack((old, z_old)) axes_new = np.vstack((new, z_new)) dcm = dcm_from_axes(axes_old,", "-p1, -p2, -p3], [p1, p0, -p3, p2], [p2, p3, p0, -p1], [p3, -p2,", "{0}'.format(ori_repr)) return euler def rotate_vector_euler(v, euler, seq='XYZ', world=True): ''' Rotates vectors about axis", "0: if angle < math.pi: u0 = rotmat[2,1] - rotmat[1,2] u1 = rotmat[0,2]", "dcm) elif n == 3: dcm = dcm_from_axes(old, new) return rotate_vector_dcm(v, dcm) def", "(3,3) ndarray The rows of B represent the orthonormal basis vectors of frame", "u2 = 0.0 return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True) def shift_vector_axis_angle(v, axis, angle,", "rotmat def shift_vector_quat(v, q, forward=False): shiftmat = get_shiftmat_quat(q, forward=forward) return np.dot(v, shiftmat.T) def", "rotmat.T else: shiftmat = rotmat return shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat(): q = np.random.random((4,))", "get_rotmat_dcm(dcm) euler = factorize_rotmat(mat, seq=seq, world=world) return euler def dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm)", "np.zeros((3,)) #Generate angle: A uniform random number from [0.0, 2*pi) angle = 2.0*math.pi*np.random.random()", "math.pi: angle = 2*math.pi - angle axis = -axis return (axis, angle) def", "angle, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat", "''' Returns the angle between two quaternions p and q. ''' return math.acos(np.dot(p,q))", "math.pi: u0 = rotmat[2,1] - rotmat[1,2] u1 = rotmat[0,2] - rotmat[2,0] u2 =", "u2 = s*rotmat[1,2] elif k == 2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2)", "- 1.0 rotmat[0,1] = 2*(q1q2 - q0q3) rotmat[0,2] = 2*(q1q3 + q0q2) rotmat[1,0]", "def invert_quat(q): ''' Inverts a quaternion in-place. ''' return conjugate_quat(q) def get_inverted_quat(q): '''", "''' Translates vectors inplace by delta. ''' n = v.shape[0] for i in", "def shift_vector_dcm(v, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_dcm(a,", "axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) return axis_angle_to_quat(axis, angle) def euler_to_dcm(euler, seq='XYZ', world=True):", "= 2.0*zeta2*rt axis[2] = 1.0 - 2.0*zetasq return fix_axis_angle(axis, angle) def axis_angle_to_quat(axis, angle):", "the Aerospace community as: #First rotation about Z-axis, second rotation about Y-axis, and", "q0q3) rotmat[1,1] = 2*(q0sq + q2sq) - 1.0 rotmat[1,2] = 2*(q2q3 - q0q1)", "forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat = get_rotmat_quat(q) return shiftmat def conjugate_quat(q): '''", "= get_shiftmat_axis_angle(axis, angle, forward=True) return dcm def any_to_axis_angle(orientation): ori_repr = orientation['repr'] if ori_repr", "repr {0}'.format(ori_repr)) return axis, angle def rotate_vector_axis_angle(v, axis, angle): ''' Rotates vectors about", "quaternion in-place. ''' return conjugate_quat(q) def get_inverted_quat(q): ''' Inverts a quaternion and returns", "world=True, forward=False): rotmat = get_rotmat_euler(euler, seq=seq, world=world) if forward: shiftmat = rotmat.T else:", "''' q[1:4] = -q[1:4] return q def get_conjugated_quat(q): ''' Conjugates a quaternion and", "quat_to_dcm(q): return get_shiftmat_quat(q, forward=True) def any_to_quat(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat':", "= 2*(q1q3 - q0q2) rotmat[2,1] = 2*(q2q3 + q0q1) rotmat[2,2] = 2*(q0sq +", "= rotmat return shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat(): q = np.random.random((4,)) return normalize_quat(q) def", "old.shape[0] if n == 1: angle = math.acos(np.dot(old, new)) axis = la.unitized(np.cross(old, new))", "axis_angle_to_quat(axis, angle) def dcm_to_euler(dcm, seq='XYZ', world=True): mat = get_rotmat_dcm(dcm) euler = factorize_rotmat(mat, seq=seq,", "axis, angle = quat_to_axis_angle(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq =", "norm = np.linalg.norm(axis) if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14): axis /= norm angle", "= quat_to_axis_angle(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world", "s = 1.0/(2*u2) u0 = s*rotmat[0,2] u1 = s*rotmat[1,2] else: u0 = 1.0", "axis = np.array(orientation['axis']) angle = orientation['angle'] euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world) elif", "z_old)) axes_new = np.vstack((new, z_new)) dcm = dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v, dcm) elif", "= extract_axis_angle_from_rotmat(mat) return (axis, angle) def any_to_dcm(orientation): ori_repr = orientation['repr'] if ori_repr ==", "else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return axis, angle def rotate_vector_axis_angle(v, axis,", "same as the sequence used in Blender. #In contrast, the XYZ sequence is", "vectors. ''' assert old.shape[0] == new.shape[0] n = old.shape[0] if n == 1:", "angle = orientation['angle'] elif ori_repr == 'dcm': axis, angle = dcm_to_axis_angle(orientation['dcm']) else: raise", "= -axis if angle > math.pi: angle = 2*math.pi - angle axis =", "ori_repr == 'dcm': euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else: raise ValueError( 'Unrecognized orientation", "a) def get_shiftmat_quat(q, forward=False): if forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat = get_rotmat_quat(q)", "R[2,1] = axis[1]*axis[2]*icos + axis[0]*sin R[2,2] = axis[2]*axis[2]*icos + cos return R def", "dcm = quat_to_dcm(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq']", "get_rand_quat(): q = np.random.random((4,)) return normalize_quat(q) def get_identity_quat(): return np.array([1.0, 0.0, 0.0, 0.0])", "in-place. ''' return conjugate_quat(q) def get_inverted_quat(q): ''' Inverts a quaternion and returns it", "frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle, normalize=True): if normalize: norm = np.linalg.norm(axis) if not", "& Tildesley p. 349. ''' axis = np.zeros((3,)) #Generate angle: A uniform random", "def conjugate_quat(q): ''' Conjugates a quaternion in-place. ''' q[1:4] = -q[1:4] return q", "''' old and new represent coordinate axes. They must be unit vectors. '''", "-axis return (axis, angle) def get_rand_axis_angle(): ''' Generates a random pair of axis-angle.", "elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle'] dcm = axis_angle_to_dcm(axis,", "angle = math.fmod(angle, 2*math.pi) if angle < 0.0: angle = -angle axis =", "repr {0}'.format(ori_repr)) return dcm def rotate_vector_dcm(v, dcm): rotmat = get_rotmat_dcm(dcm) return np.dot(v, rotmat.T)", "world, to_seq, to_world): rotmat = get_rotmat_euler(euler, seq=seq, world=world) return factorize_rotmat(rotmat, seq=to_seq, world=to_world) def", "ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] euler", "get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_dcm(dcm, forward=False): shiftmat =", "q[2]**2 q3sq = q[3]**2 q0q1 = q[0]*q[1] q0q2 = q[0]*q[2] q0q3 = q[0]*q[3]", "if not forward: shiftmat = shiftmat.T return shiftmat #Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ',", "A represent the orthonormal basis vectors of frame A. B : (3,3) ndarray", "seq='XYZ', world=True, forward=False): shiftmat = get_shiftmat_euler(euler, seq=seq, world=world, forward=forward) return np.dot(v, shiftmat.T) def", "third rotation about Z axis of the world(i.e. fixed) frame. #This is the", "elif ori_repr == 'dcm': quat = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr", "else: #Find the largest entry in the diagonal of rotmat k = np.argmax(np.diag(rotmat))", "diagonal of rotmat k = np.argmax(np.diag(rotmat)) if k == 0: u0 = math.sqrt(rotmat[0,0]-rotmat[1,1]-rotmat[2,2]+1)/2", "shiftmat #Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ', world=True): return eulang.factor_rotmat(rotmat, seq=seq, world=world) def euler_to_euler(euler,", "rotmat[2,2] = 2*(q0sq + q3sq) - 1.0 return rotmat def shift_vector_quat(v, q, forward=False):", "about axis by angle. ''' rotmat = get_rotmat_axis_angle(axis, angle) return np.dot(v, rotmat.T) def", "= get_rotmat_quat(q) return shiftmat def conjugate_quat(q): ''' Conjugates a quaternion in-place. ''' q[1:4]", "shiftmat, a) def shift_tensor3_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat,", "q0q1 = q[0]*q[1] q0q2 = q[0]*q[2] q0q3 = q[0]*q[3] q1q2 = q[1]*q[2] q1q3", "mat_is_rotmat(mat): det_is_one = math.isclose(np.linalg.det(mat), 1.0, abs_tol=1e-12, rel_tol=1e-12) is_orthogonal = np.allclose(np.dot(mat, mat.T), np.identity(3)) return", "2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2) u0 = s*rotmat[0,2] u1 = s*rotmat[1,2]", "= get_shiftmat_euler(euler, seq=seq, world=world, forward=True) return dcm def euler_to_axis_angle(euler, seq='XYZ', world=True): rotmat =", "conjugate_quat(q): ''' Conjugates a quaternion in-place. ''' q[1:4] = -q[1:4] return q def", "== 'dcm': euler = dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else: raise ValueError( 'Unrecognized orientation repr", "new instance. ''' p = np.copy(q) return conjugate_quat(p) def normalize_quat(q): ''' Normalizes a", "return factorize_rotmat(rotmat, seq=seq, world=world) def quat_to_dcm(q): return get_shiftmat_quat(q, forward=True) def any_to_quat(orientation): ori_repr =", "for i in range(n): v[i,:] += delta return v def align(v, old, new):", "= get_rotmat_euler(euler, seq=seq, world=world) return np.dot(v, rotmat.T) def get_rotmat_euler(euler, seq='XYZ', world=True): return eulang.rotmat_euler(euler,", "get_perturbed_quat(q): raise NotImplementedError def quat_to_axis_angle(q): angle = 2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2) if angle", "u0 = 1.0 u1 = 0.0 u2 = 0.0 return fix_axis_angle(np.array([u0, u1, u2]),", "linalg as la from . import eulang #Euler angle sequence: XYZ (world). First", "axes_old = np.vstack((old, z_old)) axes_new = np.vstack((new, z_new)) dcm = dcm_from_axes(axes_old, axes_new) return", "np.vstack((old, z_old)) axes_new = np.vstack((new, z_new)) dcm = dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v, dcm)", "frame. #This is the same as the sequence used in Blender. #In contrast,", "as la from . import eulang #Euler angle sequence: XYZ (world). First rotation", "forward=False): shiftmat = dcm if not forward: shiftmat = shiftmat.T return shiftmat #Euler", "= np.vstack((new, z_new)) dcm = dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v, dcm) elif n ==", "n == 1: angle = math.acos(np.dot(old, new)) axis = la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v,", "axis = la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v, axis, angle) elif n == 2: z_old", "p = np.copy(q) p[1:4] = -p[1:4] return p def invert_quat(q): ''' Inverts a", "R def extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat) angle = math.acos((trace-1)/2) if angle > 0:", "np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis, angle): R = np.zeros((3,3)) sin = np.sin(angle) cos =", "axis[2]*axis[2]*icos + cos return R def extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat) angle = math.acos((trace-1)/2)", "else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return euler def rotate_vector_euler(v, euler, seq='XYZ',", "shift_tensor2_dcm(a, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def", "u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1) u0 = s*rotmat[0,1] u2 = s*rotmat[1,2] elif", "= dcm_to_euler(orientation['dcm'], seq=to_seq, world=to_world) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return euler", "extract_axis_angle_from_rotmat(mat) return (axis, angle) def any_to_dcm(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat':", "B represent the orthonormal basis vectors of frame B. Returns ------- (3,3) ndarray", "def get_rotmat_quat(q): rotmat = np.empty((3,3)) q0sq = q[0]**2 q1sq = q[1]**2 q2sq =", "shiftmat = rotmat.T else: shiftmat = rotmat return shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat(): q", "a quaternion in-place. ''' q[1:4] = -q[1:4] return q def get_conjugated_quat(q): ''' Conjugates", "1.0, rel_tol=1e-14): return True else: return False def get_quat_prod(p, q): p0, p1, p2,", "== 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] quat =", "(axis, angle) def get_rand_axis_angle(): ''' Generates a random pair of axis-angle. The axis", "== 'euler': euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] axis, angle", "is a random vector from the surface of a unit sphere. Algorithm from", "matrix of axes(i.e. frame) B w.r.t. axes(i.e. frame) A. Parameters ---------- A :", "normalize_quat(q): ''' Normalizes a quaternion in-place. ''' q /= np.linalg.norm(q) return q def", "seq=seq, world=world) return euler def dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat)", "axis = np.array(orientation['axis']) angle = orientation['angle'] dcm = axis_angle_to_dcm(axis, angle) elif ori_repr ==", "q2) q = (q1*math.sin((1.0-t)*theta) + q2*math.sin(t*theta))/math.sin(theta) return normalize_quat(q) def get_angle_between_quat(p, q): ''' Returns", "body frame. #Axis_angle------------------------------------------------------------ def fix_axis_angle(axis, angle, normalize=True): if normalize: norm = np.linalg.norm(axis) if", "dcm = axis_angle_to_dcm(axis, angle) elif ori_repr == 'dcm': dcm = dcm_to_quat(orientation['dcm']) else: raise", "if normalize: norm = np.linalg.norm(axis) if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14): axis /=", "= dcm_to_axis_angle(orientation['dcm']) else: raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return axis, angle def", "A.T) def dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm) axis, angle = extract_axis_angle_from_rotmat(mat) return axis_angle_to_quat(axis, angle)", "= extract_axis_angle_from_rotmat(rotmat) return (axis, angle) def any_to_euler(orientation, to_seq, to_world): ori_repr = orientation['repr'] if", "= np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] dcm = euler_to_dcm(euler, seq=seq, world=world)", "''' Conjugates a quaternion and returns a copy. ''' p = np.copy(q) p[1:4]", "ndarray The dcm of frame B w.r.t. frame A. ''' return np.dot(B, A.T)", "new)) axis = la.unitized(np.cross(old, new)) return rotate_vector_axis_angle(v, axis, angle) elif n == 2:", "np.linalg.norm(q) if math.isclose(norm, 1.0, rel_tol=1e-14): return True else: return False def get_quat_prod(p, q):", "q3, -q2], [-q3, q0, q1], [ q2, -q1, q0]]) #Other functions------------------------------------------------------ def translate(v,", "2*(q0sq + q2sq) - 1.0 rotmat[1,2] = 2*(q2q3 - q0q1) rotmat[2,0] = 2*(q1q3", "shiftmat, shiftmat, shiftmat, a) def get_shiftmat_quat(q, forward=False): if forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else:", "R[2,2] = axis[2]*axis[2]*icos + cos return R def extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat) angle", "seq=to_seq, world=to_world) def euler_to_quat(euler, seq='XYZ', world=True): axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) return", "= get_rand_axis_angle() return axis_angle_to_quat(axis, angle) def get_perturbed_quat(q): raise NotImplementedError def quat_to_axis_angle(q): angle =", "return dcm.T def shift_vector_dcm(v, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.dot(v, shiftmat.T)", "return math.acos(np.dot(p,q)) def quat_deriv_to_ang_vel(q, qdot): mat = quat_deriv_to_ang_vel_mat(q) return np.dot(mat, qdot) def quat_deriv_to_ang_vel_mat(q):", "invert_quat(q): ''' Inverts a quaternion in-place. ''' return conjugate_quat(q) def get_inverted_quat(q): ''' Inverts", "by angle. ''' rotmat = get_rotmat_axis_angle(axis, angle) return np.dot(v, rotmat.T) def get_rotmat_axis_angle(axis, angle):", "= axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world) elif ori_repr == 'dcm': euler = dcm_to_euler(orientation['dcm'], seq=to_seq,", "axis_angle_to_quat(axis, angle) elif ori_repr == 'dcm': quat = dcm_to_quat(orientation['dcm']) else: raise ValueError( 'Unrecognized", "normalize_quat(q) def get_identity_quat(): return np.array([1.0, 0.0, 0.0, 0.0]) def get_rand_quat(): axis, angle =", "frame A. ''' return np.dot(B, A.T) def dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm) axis, angle", "rotmat[2,0] = 2*(q1q3 - q0q2) rotmat[2,1] = 2*(q2q3 + q0q1) rotmat[2,2] = 2*(q0sq", "quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_quat(a,", "angle: A uniform random number from [0.0, 2*pi) angle = 2.0*math.pi*np.random.random() while True:", "0.0 u2 = 0.0 return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True) def shift_vector_axis_angle(v, axis,", "q0, q1], [ q2, -q1, q0]]) #Other functions------------------------------------------------------ def translate(v, delta): ''' Translates", "returns it as a copy. ''' p = np.copy(q) return normalize_quat(p) def quat_is_normalized(q):", "R = np.zeros((3,3)) sin = np.sin(angle) cos = np.cos(angle) icos = 1.0 -", "raise ValueError( 'Unrecognized orientation repr {0}'.format(ori_repr)) return quat def rotate_vector_quat(v, q): rotmat =", "forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler,", "repr {0}'.format(ori_repr)) return quat def rotate_vector_quat(v, q): rotmat = get_rotmat_quat(q) return np.dot(v, rotmat.T)", "np.array(orientation['quat']) axis, angle = quat_to_axis_angle(quat) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq", "euler = np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] axis, angle = euler_to_axis_angle(euler,", "orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) axis, angle = quat_to_axis_angle(quat) elif", "delta return v def align(v, old, new): ''' old and new represent coordinate", "= s*rotmat[0,2] elif k == 1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1) u0", "axis = q[1:4]/sin else: rotmat = get_rotmat_quat(q) axis, angle = extract_axis_angle_from_rotmat(rotmat) else: axis", "in Blender. #In contrast, the XYZ sequence is understood in the Aerospace community", "orientation repr {0}'.format(ori_repr)) return axis, angle def rotate_vector_axis_angle(v, axis, angle): ''' Rotates vectors", "return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward)", "factorize_rotmat(rotmat, seq=seq, world=world) return euler def axis_angle_to_dcm(axis, angle): dcm = get_shiftmat_axis_angle(axis, angle, forward=True)", "np.empty((3,3)) q0sq = q[0]**2 q1sq = q[1]**2 q2sq = q[2]**2 q3sq = q[3]**2", "world=world) if forward: shiftmat = rotmat.T else: shiftmat = rotmat return shiftmat #Quaternion-----------------------------------------------------------", "def ang_vel_to_quat_deriv(q, ang_vel): mat = ang_vel_to_quat_deriv_mat(q) qdot = np.dot(mat, ang_vel) return qdot def", "def get_quat_prod(p, q): p0, p1, p2, p3 = tuple(p) prod_mat = np.array([[p0, -p1,", "ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) axis, angle =", "world = orientation['world'] euler = euler_to_euler(euler, seq, world, to_seq, to_world) elif ori_repr ==", "= get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_axis_angle(axis, angle,", "mat = get_rotmat_dcm(dcm) euler = factorize_rotmat(mat, seq=seq, world=world) return euler def dcm_to_axis_angle(dcm): mat", "p[1:4] = -p[1:4] return p def invert_quat(q): ''' Inverts a quaternion in-place. '''", ": (3,3) ndarray The rows of B represent the orthonormal basis vectors of", "ndarray The rows of A represent the orthonormal basis vectors of frame A.", "not forward: shiftmat = shiftmat.T return shiftmat #Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ', world=True):", "= orientation['angle'] euler = axis_angle_to_euler(axis, angle, seq=to_seq, world=to_world) elif ori_repr == 'dcm': euler", "w.r.t. frame A. ''' return np.dot(B, A.T) def dcm_to_quat(dcm): mat = get_rotmat_dcm(dcm) axis,", "dcm = dcm_from_axes(old, new) return rotate_vector_dcm(v, dcm) def mat_is_dcm(mat): return mat_is_rotmat(mat) def mat_is_rotmat(mat):", "= get_shiftmat_quat(q, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat,", "shiftmat, shiftmat, a) def get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat = get_rotmat_axis_angle(-axis, angle) if not", "extract_axis_angle_from_rotmat(rotmat) return (axis, angle) def any_to_euler(orientation, to_seq, to_world): ori_repr = orientation['repr'] if ori_repr", "seq = orientation['seq'] world = orientation['world'] quat = euler_to_quat(euler, seq=seq, world=world) elif ori_repr", "= np.copy(q) return conjugate_quat(p) def normalize_quat(q): ''' Normalizes a quaternion in-place. ''' q", "math.isclose(norm, 1.0, rel_tol=1e-14): return True else: return False def get_quat_prod(p, q): p0, p1,", "orientation['angle'] dcm = axis_angle_to_dcm(axis, angle) elif ori_repr == 'dcm': dcm = dcm_to_quat(orientation['dcm']) else:", "if not forward: shiftmat = shiftmat.T return shiftmat #Direction cosine matrix----------------------------------------------- def dcm_from_axes(A,", "np.copy(q) p[1:4] = -p[1:4] return p def invert_quat(q): ''' Inverts a quaternion in-place.", "get_rotmat_quat(q) return np.dot(v, rotmat.T) def get_rotmat_quat(q): rotmat = np.empty((3,3)) q0sq = q[0]**2 q1sq", "quat_to_euler(quat, seq=to_seq, world=to_world) elif ori_repr == 'euler': euler = np.array(orientation['euler']) seq = orientation['seq']", "Translates vectors inplace by delta. ''' n = v.shape[0] for i in range(n):", "def get_rotmat_dcm(dcm): return dcm.T def shift_vector_dcm(v, dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return", "= 2*(q1q2 + q0q3) rotmat[1,1] = 2*(q0sq + q2sq) - 1.0 rotmat[1,2] =", "forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_euler(euler,", "''' Rotates vectors about axis by angle. ''' rotmat = get_rotmat_euler(euler, seq=seq, world=world)", "angle) if not forward: shiftmat = shiftmat.T return shiftmat #Direction cosine matrix----------------------------------------------- def", "orientation['world'] euler = euler_to_euler(euler, seq, world, to_seq, to_world) elif ori_repr == 'axis_angle': axis", "seq=seq, world=world) def euler_to_euler(euler, seq, world, to_seq, to_world): rotmat = get_rotmat_euler(euler, seq=seq, world=world)", "rotmat.T) def get_rotmat_axis_angle(axis, angle): R = np.zeros((3,3)) sin = np.sin(angle) cos = np.cos(angle)", "== 'quat': quat = np.array(orientation['quat']) axis, angle = quat_to_axis_angle(quat) elif ori_repr == 'euler':", "''' rotmat = get_rotmat_euler(euler, seq=seq, world=world) return np.dot(v, rotmat.T) def get_rotmat_euler(euler, seq='XYZ', world=True):", "fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True) def shift_vector_axis_angle(v, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis,", "q[1]*q[3] q2q3 = q[2]*q[3] rotmat[0,0] = 2*(q0sq + q1sq) - 1.0 rotmat[0,1] =", "def any_to_axis_angle(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) axis,", "+ q0q3) rotmat[1,1] = 2*(q0sq + q2sq) - 1.0 rotmat[1,2] = 2*(q2q3 -", "1: u1 = math.sqrt(rotmat[1,1]-rotmat[0,0]-rotmat[2,2]+1)/2 s = 1.0/(2*u1) u0 = s*rotmat[0,1] u2 = s*rotmat[1,2]", "= np.array(orientation['quat']) euler = quat_to_euler(quat, seq=to_seq, world=to_world) elif ori_repr == 'euler': euler =", "= shiftmat.T return shiftmat #Direction cosine matrix----------------------------------------------- def dcm_from_axes(A, B): ''' Returns the", "the third rotation about Z axis of the world(i.e. fixed) frame. #This is", "get_shiftmat_axis_angle(axis, angle, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_axis_angle(a, axis, angle, forward=False): shiftmat =", "q0q3) rotmat[0,2] = 2*(q1q3 + q0q2) rotmat[1,0] = 2*(q1q2 + q0q3) rotmat[1,1] =", "largest entry in the diagonal of rotmat k = np.argmax(np.diag(rotmat)) if k ==", "dcm, forward=False): shiftmat = get_shiftmat_dcm(dcm, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def", "zeta2**2 if zetasq <= 1.0: break rt = np.sqrt(1.0-zetasq) axis[0] = 2.0*zeta1*rt axis[1]", "get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_axis_angle(a, axis, angle, forward=False):", "def shift_tensor3_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat,", "q[1:4]/sin else: rotmat = get_rotmat_quat(q) axis, angle = extract_axis_angle_from_rotmat(rotmat) else: axis = np.array([1.0,", "= s*rotmat[1,2] elif k == 2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2) u0", "= q[1]**2 q2sq = q[2]**2 q3sq = q[3]**2 q0q1 = q[0]*q[1] q0q2 =", "= orientation['world'] axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis", "= np.zeros((3,3)) sin = np.sin(angle) cos = np.cos(angle) icos = 1.0 - cos", "euler def axis_angle_to_dcm(axis, angle): dcm = get_shiftmat_axis_angle(axis, angle, forward=True) return dcm def any_to_axis_angle(orientation):", "get_shiftmat_euler(euler, seq=seq, world=world, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_euler(a, euler, forward=False): shiftmat =", "axis, angle = get_rand_axis_angle() return axis_angle_to_quat(axis, angle) def get_perturbed_quat(q): raise NotImplementedError def quat_to_axis_angle(q):", "q2sq) - 1.0 rotmat[1,2] = 2*(q2q3 - q0q1) rotmat[2,0] = 2*(q1q3 - q0q2)", "quat_deriv_to_ang_vel_mat(q): q0, q1, q2, q3 = tuple(q) return 2*np.array([[-q1, q0, -q3, q2], [-q2,", "w = math.cos(angle/2) v = math.sin(angle/2)*axis q = np.array([w, v[0], v[1], v[2]]) return", "np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return", "pair of axis-angle. The axis is a random vector from the surface of", "a quaternion and returns a copy. ''' p = np.copy(q) p[1:4] = -p[1:4]", "np.array(orientation['euler']) seq = orientation['seq'] world = orientation['world'] quat = euler_to_quat(euler, seq=seq, world=world) elif", "+ cos return R def extract_axis_angle_from_rotmat(rotmat): trace = np.trace(rotmat) angle = math.acos((trace-1)/2) if", "any_to_quat(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) elif ori_repr", "2*math.pi - angle axis = -axis return (axis, angle) def get_rand_axis_angle(): ''' Generates", "q3sq = q[3]**2 q0q1 = q[0]*q[1] q0q2 = q[0]*q[2] q0q3 = q[0]*q[3] q1q2", "= -axis return (axis, angle) def get_rand_axis_angle(): ''' Generates a random pair of", "elif k == 2: u2 = math.sqrt(rotmat[2,2]-rotmat[0,0]-rotmat[1,1]+1)/2 s = 1.0/(2*u2) u0 = s*rotmat[0,2]", "np from . import linalg as la from . import eulang #Euler angle", "= shiftmat.T return shiftmat #Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ', world=True): return eulang.factor_rotmat(rotmat, seq=seq,", "p1, p0]]) pq = normalize_quat(np.dot(prod_mat, q)) return pq def interpolate_quat(q1, q2, t): theta", "normalize=True): if normalize: norm = np.linalg.norm(axis) if not math.isclose(norm, 1.0, abs_tol=1e-14, rel_tol=1e-14): axis", "= np.array([[p0, -p1, -p2, -p3], [p1, p0, -p3, p2], [p2, p3, p0, -p1],", "= factorize_rotmat(mat, seq=seq, world=world) return euler def dcm_to_axis_angle(dcm): mat = get_rotmat_dcm(dcm) axis, angle", "seq, world, to_seq, to_world): rotmat = get_rotmat_euler(euler, seq=seq, world=world) return factorize_rotmat(rotmat, seq=to_seq, world=to_world)", "shiftmat.T) def shift_tensor2_euler(a, euler, forward=False): shiftmat = get_shiftmat_euler(euler, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat,", "def quat_to_axis_angle(q): angle = 2*math.acos(q[0]) sin = math.sqrt(1.0-q[0]**2) if angle > 0.0: if", "angle = -angle axis = -axis if angle > math.pi: angle = 2*math.pi", "rotmat[1,2] = 2*(q2q3 - q0q1) rotmat[2,0] = 2*(q1q3 - q0q2) rotmat[2,1] = 2*(q2q3", "a quaternion in-place. ''' return conjugate_quat(q) def get_inverted_quat(q): ''' Inverts a quaternion and", "< math.pi: u0 = rotmat[2,1] - rotmat[1,2] u1 = rotmat[0,2] - rotmat[2,0] u2", "q0, -q1], [-q3, -q2, q1, q0]]) def ang_vel_to_quat_deriv(q, ang_vel): mat = ang_vel_to_quat_deriv_mat(q) qdot", "frame) B w.r.t. axes(i.e. frame) A. Parameters ---------- A : (3,3) ndarray The", "if forward: shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat = get_rotmat_quat(q) return shiftmat def conjugate_quat(q):", "angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def", "= np.array(orientation['axis']) angle = orientation['angle'] elif ori_repr == 'dcm': axis, angle = dcm_to_axis_angle(orientation['dcm'])", "= get_shiftmat_quat(quat, forward=forward) return np.einsum('ip,jq,pq', shiftmat, shiftmat, a) def shift_tensor3_quat(a, quat, forward=False): shiftmat", "and returns a copy. ''' p = np.copy(q) p[1:4] = -p[1:4] return p", "1.0 zeta2 = 2.0*np.random.random() - 1.0 zetasq = zeta1**2 + zeta2**2 if zetasq", "rows of A represent the orthonormal basis vectors of frame A. B :", "np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_euler(euler, seq='XYZ', world=True, forward=False): rotmat = get_rotmat_euler(euler,", "euler_to_dcm(euler, seq=seq, world=world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle = orientation['angle']", "forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_quat(a, quat, forward=False): shiftmat = get_shiftmat_quat(quat, forward=forward) return", "quat_to_euler(q, seq='XYZ', world=True): rotmat = get_rotmat_quat(q) return factorize_rotmat(rotmat, seq=seq, world=world) def quat_to_dcm(q): return", "qdot def ang_vel_to_quat_deriv_mat(q): q0, q1, q2, q3 = tuple(q) return 0.5*np.array([[-q1, -q2, -q3],", "def quat_to_dcm(q): return get_shiftmat_quat(q, forward=True) def any_to_quat(orientation): ori_repr = orientation['repr'] if ori_repr ==", "p0, p1, p2, p3 = tuple(p) prod_mat = np.array([[p0, -p1, -p2, -p3], [p1,", "shiftmat, a) def get_shiftmat_axis_angle(axis, angle, forward=False): shiftmat = get_rotmat_axis_angle(-axis, angle) if not forward:", "''' p = np.copy(q) p[1:4] = -p[1:4] return p def invert_quat(q): ''' Inverts", "axis[0]*axis[0]*icos + cos R[0,1] = axis[0]*axis[1]*icos - axis[2]*sin R[0,2] = axis[0]*axis[2]*icos + axis[1]*sin", "z_old = la.unitized(np.cross(old[0,:], old[1,:])) z_new = la.unitized(np.cross(new[0,:], new[1,:])) axes_old = np.vstack((old, z_old)) axes_new", "return False def get_quat_prod(p, q): p0, p1, p2, p3 = tuple(p) prod_mat =", "n = v.shape[0] for i in range(n): v[i,:] += delta return v def", "and new represent coordinate axes. They must be unit vectors. ''' assert old.shape[0]", "math.fmod(angle, 2*math.pi) if angle < 0.0: angle = -angle axis = -axis if", "axis[1] = 2.0*zeta2*rt axis[2] = 1.0 - 2.0*zetasq return fix_axis_angle(axis, angle) def axis_angle_to_quat(axis,", "normalize_quat(np.dot(prod_mat, q)) return pq def interpolate_quat(q1, q2, t): theta = get_angle_between_quat(q1, q2) q", "rotmat[0,1] = 2*(q1q2 - q0q3) rotmat[0,2] = 2*(q1q3 + q0q2) rotmat[1,0] = 2*(q1q2", "shiftmat = get_rotmat_quat(get_conjugated_quat(q)) else: shiftmat = get_rotmat_quat(q) return shiftmat def conjugate_quat(q): ''' Conjugates", "def dcm_to_euler(dcm, seq='XYZ', world=True): mat = get_rotmat_dcm(dcm) euler = factorize_rotmat(mat, seq=seq, world=world) return", "return shiftmat #Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ', world=True): return eulang.factor_rotmat(rotmat, seq=seq, world=world) def", "seq='XYZ', world=True): axis, angle = euler_to_axis_angle(euler, seq=seq, world=world) return axis_angle_to_quat(axis, angle) def euler_to_dcm(euler,", "shiftmat.T) def shift_tensor2_axis_angle(a, axis, angle, forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,pq',", "get_rotmat_quat(q) axis, angle = extract_axis_angle_from_rotmat(rotmat) else: axis = np.array([1.0, 0.0, 0.0]) return fix_axis_angle(axis,", "a copy. ''' p = np.copy(q) return normalize_quat(p) def quat_is_normalized(q): norm = np.linalg.norm(q)", "ori_repr = orientation['repr'] if ori_repr == 'quat': quat = np.array(orientation['quat']) euler = quat_to_euler(quat,", "return get_shiftmat_quat(q, forward=True) def any_to_quat(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat", "= np.array([w, v[0], v[1], v[2]]) return normalize_quat(q) def axis_angle_to_euler(axis, angle, seq='XYZ', world=True): rotmat", "ndarray The rows of B represent the orthonormal basis vectors of frame B.", "B : (3,3) ndarray The rows of B represent the orthonormal basis vectors", "get_shiftmat_axis_angle(axis, angle, forward=True) return dcm def any_to_axis_angle(orientation): ori_repr = orientation['repr'] if ori_repr ==", "= rotmat.T else: shiftmat = rotmat return shiftmat #Quaternion----------------------------------------------------------- def get_rand_quat(): q =", "q): p0, p1, p2, p3 = tuple(p) prod_mat = np.array([[p0, -p1, -p2, -p3],", "forward=False): shiftmat = get_shiftmat_axis_angle(axis, angle, forward=forward) return np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def", "def shift_vector_quat(v, q, forward=False): shiftmat = get_shiftmat_quat(q, forward=forward) return np.dot(v, shiftmat.T) def shift_tensor2_quat(a,", "seq=seq, world=world) return euler def axis_angle_to_dcm(axis, angle): dcm = get_shiftmat_axis_angle(axis, angle, forward=True) return", "euler_to_euler(euler, seq, world, to_seq, to_world) elif ori_repr == 'axis_angle': axis = np.array(orientation['axis']) angle", "rotation about Z-axis, second rotation about Y-axis, and the third #rotation about X-axis", "(axis, angle) def any_to_dcm(orientation): ori_repr = orientation['repr'] if ori_repr == 'quat': quat =", "forward: shiftmat = shiftmat.T return shiftmat #Euler angle----------------------------------------------------------- def factorize_rotmat(rotmat, seq='XYZ', world=True): return", "np.einsum('ip,jq,kr,pqr', shiftmat, shiftmat, shiftmat, a) def get_shiftmat_dcm(dcm, forward=False): shiftmat = dcm if not", "return fix_axis_angle(np.array([u0, u1, u2]), angle, normalize=True) def shift_vector_axis_angle(v, axis, angle, forward=False): shiftmat =", "seq='XYZ', world=True): dcm = get_shiftmat_euler(euler, seq=seq, world=world, forward=True) return dcm def euler_to_axis_angle(euler, seq='XYZ',", "axes_new = np.vstack((new, z_new)) dcm = dcm_from_axes(axes_old, axes_new) return rotate_vector_dcm(v, dcm) elif n", "''' Inverts a quaternion and returns it as a new instance. ''' p" ]
[ "with open(args.csv, \"r\") as fl: for line in (l.strip() for l in fl):", "= hashlib.md5(mm).hexdigest().lower() mm.close() return FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum) def get_file_stats(base_dir, exclusions, get_checksums=None):", "fn: True, lambda fn: False with open(args.csv, \"r\") as fl: for line in", "for l in fl): if line: remote_fd = FileDesc(line.split(',', 4)) local_path = os.path.normpath(os.path.join(args.base_dir,", "'item_type', 'size', 'mtime', 'checksum']) def get_file_desc(full_path, want_checksum): try: stinf = os.stat(full_path) except: return", "remote_fd.checksum != local_fd.checksum: raise CompareError(\"download\", \"checksum\") if remote_fd.mtime != local_fd.mtime: os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime))", "argp.parse_args(sys.argv[1:]) if not hasattr(args, 'func'): raise RuntimeError(\"No sub-command specified. See --help for assistance.\")", "do_checksum if local_fd.checksum else dont_checksum local_fd = get_file_desc(local_path, want_checksum) try: if not local_fd:", "action, reason): super().__init__(\"Compare failed\") self.action = action self.reason = reason def cmp_cmd(args): do_checksum,", "CompareError(\"download\", \"checksum\") if remote_fd.mtime != local_fd.mtime: os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime)) raise CompareError(\"#touched\", \"mtime\") except", "True else: want_checksum = lambda filename: filename in get_checksums for base, dirs, files", "want_checksum) if fd: yield fd def ls_cmd(args): if args.checksum: get_checksums = [] else:", "== 'F' and item_size > 0 and want_checksum(full_path): with open(full_path, 'rb') as fl:", "lambda filename: True else: want_checksum = lambda filename: filename in get_checksums for base,", "elif not get_checksums: want_checksum = lambda filename: True else: want_checksum = lambda filename:", "if args.checksum: get_checksums = [] else: get_checksums = args.filepath or None for filedesc", "\"changed\") elif remote_fd.item_type == 'F': raise CompareError(\"download\", \"changed\") if remote_fd.size != local_fd.size: raise", "for base, dirs, files in os.walk(base_dir): if base == base_dir: # Skip top-level", "lscmd.add_argument(\"filepath\", action=\"append\", default=[], nargs='*', help=\"File paths to check\") lscmd.set_defaults(func=ls_cmd) cmpcmd = subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\",", "or None for filedesc in get_file_stats(args.base_dir, exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path))", "CompareError(\"download\", \"missing\") if local_fd.item_type != remote_fd.item_type: if remote_fd.item_type == 'D': raise CompareError(\"mkdir\", \"changed\")", "lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force checksumming\") lscmd.add_argument(\"filepath\", action=\"append\", default=[], nargs='*', help=\"File paths to check\") lscmd.set_defaults(func=ls_cmd)", "remote_fd.mtime, remote_fd.full_path)) if __name__ == \"__main__\": argp = argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\", default=\"/svn\",", "= lambda filename: filename in get_checksums for base, dirs, files in os.walk(base_dir): if", "if stat.S_ISREG(stinf.st_mode): item_type, item_size = 'F', stinf.st_size elif stat.S_ISDIR(stinf.st_mode): item_type, item_size = 'D',", "item_type, item_size = 'D', 0 else: return None checksum = 0 if item_type", "local_fd.checksum else dont_checksum local_fd = get_file_desc(local_path, want_checksum) try: if not local_fd: raise CompareError(\"download\",", "want_checksum): try: stinf = os.stat(full_path) except: return None if stat.S_ISREG(stinf.st_mode): item_type, item_size =", "raise CompareError(\"download\", \"missing\") if local_fd.item_type != remote_fd.item_type: if remote_fd.item_type == 'D': raise CompareError(\"mkdir\",", "False with open(args.csv, \"r\") as fl: for line in (l.strip() for l in", "if not local_fd: raise CompareError(\"download\", \"missing\") if local_fd.item_type != remote_fd.item_type: if remote_fd.item_type ==", "local_fd: raise CompareError(\"download\", \"missing\") if local_fd.item_type != remote_fd.item_type: if remote_fd.item_type == 'D': raise", "lscmd = subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force checksumming\") lscmd.add_argument(\"filepath\", action=\"append\", default=[], nargs='*', help=\"File paths", "Skip top-level cruft dirs[:] = [d for d in dirs if d not", "return FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum) def get_file_stats(base_dir, exclusions, get_checksums=None): if get_checksums is", "dirs, files in os.walk(base_dir): if base == base_dir: # Skip top-level cruft dirs[:]", "against\") args = argp.parse_args(sys.argv[1:]) if not hasattr(args, 'func'): raise RuntimeError(\"No sub-command specified. See", "= 0 if item_type == 'F' and item_size > 0 and want_checksum(full_path): with", "CompareError as e: print(\"%s,%s,%s,%s\" % (e.action, e.reason, remote_fd.mtime, remote_fd.full_path)) if __name__ == \"__main__\":", "lambda fn: True, lambda fn: False with open(args.csv, \"r\") as fl: for line", "\"checksum\") if remote_fd.mtime != local_fd.mtime: os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime)) raise CompareError(\"#touched\", \"mtime\") except CompareError", "if base == base_dir: # Skip top-level cruft dirs[:] = [d for d", "filedesc.checksum, filedesc.full_path)) class CompareError(Exception): def __init__(self, action, reason): super().__init__(\"Compare failed\") self.action = action", "get_file_desc(full_path, want_checksum): try: stinf = os.stat(full_path) except: return None if stat.S_ISREG(stinf.st_mode): item_type, item_size", "args.checksum: get_checksums = [] else: get_checksums = args.filepath or None for filedesc in", "dont_checksum local_fd = get_file_desc(local_path, want_checksum) try: if not local_fd: raise CompareError(\"download\", \"missing\") if", "= action self.reason = reason def cmp_cmd(args): do_checksum, dont_checksum = lambda fn: True,", "if local_fd.item_type != remote_fd.item_type: if remote_fd.item_type == 'D': raise CompareError(\"mkdir\", \"changed\") elif remote_fd.item_type", "from collections import namedtuple exclusions = ('lost+found', 'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins') joinpath", "True, lambda fn: False with open(args.csv, \"r\") as fl: for line in (l.strip()", "filedesc in get_file_stats(args.base_dir, exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path)) class CompareError(Exception): def", "if d not in exclusions] for filename in files: fd = get_file_desc(posixpath.join(base, filename),", "= ('lost+found', 'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins') joinpath = posixpath.join FileDesc = namedtuple('FileDesc',", "'rb') as fl: mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ) checksum = hashlib.md5(mm).hexdigest().lower() mm.close() return", "args.filepath or None for filedesc in get_file_stats(args.base_dir, exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum,", "stat import sys from collections import namedtuple exclusions = ('lost+found', 'restore', 'backup', 'Newsfeed',", "stat.S_ISDIR(stinf.st_mode): item_type, item_size = 'D', 0 else: return None checksum = 0 if", "dirs[:] = [d for d in dirs if d not in exclusions] for", "hashlib import mmap import os import posixpath import stat import sys from collections", "= argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\", default=\"/svn\", help=\"Base folder\") subp = argp.add_subparsers() lscmd =", "filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path)) class CompareError(Exception): def __init__(self, action, reason): super().__init__(\"Compare failed\") self.action", "= 'D', 0 else: return None checksum = 0 if item_type == 'F'", "for line in (l.strip() for l in fl): if line: remote_fd = FileDesc(line.split(',',", "= os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum = do_checksum if local_fd.checksum else dont_checksum local_fd = get_file_desc(local_path,", "help=\"Base folder\") subp = argp.add_subparsers() lscmd = subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force checksumming\") lscmd.add_argument(\"filepath\",", "check against\") args = argp.parse_args(sys.argv[1:]) if not hasattr(args, 'func'): raise RuntimeError(\"No sub-command specified.", "stat.S_ISREG(stinf.st_mode): item_type, item_size = 'F', stinf.st_size elif stat.S_ISDIR(stinf.st_mode): item_type, item_size = 'D', 0", "filename), want_checksum) if fd: yield fd def ls_cmd(args): if args.checksum: get_checksums = []", "if not hasattr(args, 'func'): raise RuntimeError(\"No sub-command specified. See --help for assistance.\") args.func(args)", "class CompareError(Exception): def __init__(self, action, reason): super().__init__(\"Compare failed\") self.action = action self.reason =", "'F', stinf.st_size elif stat.S_ISDIR(stinf.st_mode): item_type, item_size = 'D', 0 else: return None checksum", "mm.close() return FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum) def get_file_stats(base_dir, exclusions, get_checksums=None): if get_checksums", "= do_checksum if local_fd.checksum else dont_checksum local_fd = get_file_desc(local_path, want_checksum) try: if not", "CompareError(\"mkdir\", \"changed\") elif remote_fd.item_type == 'F': raise CompareError(\"download\", \"changed\") if remote_fd.size != local_fd.size:", "posixpath import stat import sys from collections import namedtuple exclusions = ('lost+found', 'restore',", "if remote_fd.checksum != local_fd.checksum: raise CompareError(\"download\", \"checksum\") if remote_fd.mtime != local_fd.mtime: os.utime(local_fd, (remote_fd.mtime,", "item_size = 'F', stinf.st_size elif stat.S_ISDIR(stinf.st_mode): item_type, item_size = 'D', 0 else: return", "def get_file_desc(full_path, want_checksum): try: stinf = os.stat(full_path) except: return None if stat.S_ISREG(stinf.st_mode): item_type,", "== \"__main__\": argp = argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\", default=\"/svn\", help=\"Base folder\") subp =", "in files: fd = get_file_desc(posixpath.join(base, filename), want_checksum) if fd: yield fd def ls_cmd(args):", "if local_fd.checksum else dont_checksum local_fd = get_file_desc(local_path, want_checksum) try: if not local_fd: raise", "to check against\") args = argp.parse_args(sys.argv[1:]) if not hasattr(args, 'func'): raise RuntimeError(\"No sub-command", "('lost+found', 'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins') joinpath = posixpath.join FileDesc = namedtuple('FileDesc', ['full_path',", "filedesc.mtime, filedesc.checksum, filedesc.full_path)) class CompareError(Exception): def __init__(self, action, reason): super().__init__(\"Compare failed\") self.action =", "return None if stat.S_ISREG(stinf.st_mode): item_type, item_size = 'F', stinf.st_size elif stat.S_ISDIR(stinf.st_mode): item_type, item_size", "!= remote_fd.item_type: if remote_fd.item_type == 'D': raise CompareError(\"mkdir\", \"changed\") elif remote_fd.item_type == 'F':", "self.action = action self.reason = reason def cmp_cmd(args): do_checksum, dont_checksum = lambda fn:", "__name__ == \"__main__\": argp = argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\", default=\"/svn\", help=\"Base folder\") subp", "get_checksums for base, dirs, files in os.walk(base_dir): if base == base_dir: # Skip", "raise CompareError(\"download\", \"checksum\") if remote_fd.mtime != local_fd.mtime: os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime)) raise CompareError(\"#touched\", \"mtime\")", "(l.strip() for l in fl): if line: remote_fd = FileDesc(line.split(',', 4)) local_path =", "argp.add_subparsers() lscmd = subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force checksumming\") lscmd.add_argument(\"filepath\", action=\"append\", default=[], nargs='*', help=\"File", "0, access=mmap.ACCESS_READ) checksum = hashlib.md5(mm).hexdigest().lower() mm.close() return FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum) def", "[d for d in dirs if d not in exclusions] for filename in", "help=\"Force checksumming\") lscmd.add_argument(\"filepath\", action=\"append\", default=[], nargs='*', help=\"File paths to check\") lscmd.set_defaults(func=ls_cmd) cmpcmd =", "print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path)) class CompareError(Exception): def __init__(self, action, reason): super().__init__(\"Compare failed\")", "remote_fd.size != local_fd.size: raise CompareError(\"download\", \"size\") if remote_fd.checksum != local_fd.checksum: raise CompareError(\"download\", \"checksum\")", "if item_type == 'F' and item_size > 0 and want_checksum(full_path): with open(full_path, 'rb')", "filename: filename in get_checksums for base, dirs, files in os.walk(base_dir): if base ==", "!= local_fd.size: raise CompareError(\"download\", \"size\") if remote_fd.checksum != local_fd.checksum: raise CompareError(\"download\", \"checksum\") if", "fd def ls_cmd(args): if args.checksum: get_checksums = [] else: get_checksums = args.filepath or", "is None: want_checksum = lambda filename: False elif not get_checksums: want_checksum = lambda", "l in fl): if line: remote_fd = FileDesc(line.split(',', 4)) local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path))", "stinf.st_mtime, checksum) def get_file_stats(base_dir, exclusions, get_checksums=None): if get_checksums is None: want_checksum = lambda", "open(args.csv, \"r\") as fl: for line in (l.strip() for l in fl): if", "as fl: mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ) checksum = hashlib.md5(mm).hexdigest().lower() mm.close() return FileDesc(full_path,", "import namedtuple exclusions = ('lost+found', 'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins') joinpath = posixpath.join", "0 else: return None checksum = 0 if item_type == 'F' and item_size", "want_checksum = lambda filename: False elif not get_checksums: want_checksum = lambda filename: True", "filename: False elif not get_checksums: want_checksum = lambda filename: True else: want_checksum =", "= subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str, help=\"CSV file to check against\") args = argp.parse_args(sys.argv[1:]) if", "argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\", default=\"/svn\", help=\"Base folder\") subp = argp.add_subparsers() lscmd = subp.add_parser(\"ls\")", "as e: print(\"%s,%s,%s,%s\" % (e.action, e.reason, remote_fd.mtime, remote_fd.full_path)) if __name__ == \"__main__\": argp", "e: print(\"%s,%s,%s,%s\" % (e.action, e.reason, remote_fd.mtime, remote_fd.full_path)) if __name__ == \"__main__\": argp =", "top-level cruft dirs[:] = [d for d in dirs if d not in", "'mtime', 'checksum']) def get_file_desc(full_path, want_checksum): try: stinf = os.stat(full_path) except: return None if", "try: if not local_fd: raise CompareError(\"download\", \"missing\") if local_fd.item_type != remote_fd.item_type: if remote_fd.item_type", "% (e.action, e.reason, remote_fd.mtime, remote_fd.full_path)) if __name__ == \"__main__\": argp = argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\",", "item_type == 'F' and item_size > 0 and want_checksum(full_path): with open(full_path, 'rb') as", "None checksum = 0 if item_type == 'F' and item_size > 0 and", "sys from collections import namedtuple exclusions = ('lost+found', 'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins')", "files in os.walk(base_dir): if base == base_dir: # Skip top-level cruft dirs[:] =", "local_fd.mtime: os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime)) raise CompareError(\"#touched\", \"mtime\") except CompareError as e: print(\"%s,%s,%s,%s\" %", "CompareError(\"download\", \"changed\") if remote_fd.size != local_fd.size: raise CompareError(\"download\", \"size\") if remote_fd.checksum != local_fd.checksum:", "open(full_path, 'rb') as fl: mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ) checksum = hashlib.md5(mm).hexdigest().lower() mm.close()", "filename in files: fd = get_file_desc(posixpath.join(base, filename), want_checksum) if fd: yield fd def", "print(\"%s,%s,%s,%s\" % (e.action, e.reason, remote_fd.mtime, remote_fd.full_path)) if __name__ == \"__main__\": argp = argparse.ArgumentParser(\"hasher\")", "base == base_dir: # Skip top-level cruft dirs[:] = [d for d in", "None: want_checksum = lambda filename: False elif not get_checksums: want_checksum = lambda filename:", "hashlib.md5(mm).hexdigest().lower() mm.close() return FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum) def get_file_stats(base_dir, exclusions, get_checksums=None): if", "get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path)) class CompareError(Exception): def __init__(self, action, reason): super().__init__(\"Compare", "subp = argp.add_subparsers() lscmd = subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force checksumming\") lscmd.add_argument(\"filepath\", action=\"append\", default=[],", "get_file_desc(local_path, want_checksum) try: if not local_fd: raise CompareError(\"download\", \"missing\") if local_fd.item_type != remote_fd.item_type:", "\"r\") as fl: for line in (l.strip() for l in fl): if line:", "== 'F': raise CompareError(\"download\", \"changed\") if remote_fd.size != local_fd.size: raise CompareError(\"download\", \"size\") if", "\"missing\") if local_fd.item_type != remote_fd.item_type: if remote_fd.item_type == 'D': raise CompareError(\"mkdir\", \"changed\") elif", "paths to check\") lscmd.set_defaults(func=ls_cmd) cmpcmd = subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str, help=\"CSV file to check", "remote_fd.item_type == 'D': raise CompareError(\"mkdir\", \"changed\") elif remote_fd.item_type == 'F': raise CompareError(\"download\", \"changed\")", "if remote_fd.size != local_fd.size: raise CompareError(\"download\", \"size\") if remote_fd.checksum != local_fd.checksum: raise CompareError(\"download\",", "'backup', 'Newsfeed', '.pki', 'Jenkins') joinpath = posixpath.join FileDesc = namedtuple('FileDesc', ['full_path', 'item_type', 'size',", "else dont_checksum local_fd = get_file_desc(local_path, want_checksum) try: if not local_fd: raise CompareError(\"download\", \"missing\")", "except: return None if stat.S_ISREG(stinf.st_mode): item_type, item_size = 'F', stinf.st_size elif stat.S_ISDIR(stinf.st_mode): item_type,", "remote_fd.mtime)) raise CompareError(\"#touched\", \"mtime\") except CompareError as e: print(\"%s,%s,%s,%s\" % (e.action, e.reason, remote_fd.mtime,", "def ls_cmd(args): if args.checksum: get_checksums = [] else: get_checksums = args.filepath or None", "cmpcmd = subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str, help=\"CSV file to check against\") args = argp.parse_args(sys.argv[1:])", "lscmd.set_defaults(func=ls_cmd) cmpcmd = subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str, help=\"CSV file to check against\") args =", "filename: True else: want_checksum = lambda filename: filename in get_checksums for base, dirs,", "joinpath = posixpath.join FileDesc = namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum']) def get_file_desc(full_path,", "= [d for d in dirs if d not in exclusions] for filename", "args = argp.parse_args(sys.argv[1:]) if not hasattr(args, 'func'): raise RuntimeError(\"No sub-command specified. See --help", "'size', 'mtime', 'checksum']) def get_file_desc(full_path, want_checksum): try: stinf = os.stat(full_path) except: return None", "os import posixpath import stat import sys from collections import namedtuple exclusions =", "'D', 0 else: return None checksum = 0 if item_type == 'F' and", "argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\", default=\"/svn\", help=\"Base folder\") subp = argp.add_subparsers() lscmd = subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\",", "stinf = os.stat(full_path) except: return None if stat.S_ISREG(stinf.st_mode): item_type, item_size = 'F', stinf.st_size", "remote_fd.item_type: if remote_fd.item_type == 'D': raise CompareError(\"mkdir\", \"changed\") elif remote_fd.item_type == 'F': raise", "os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime)) raise CompareError(\"#touched\", \"mtime\") except CompareError as e: print(\"%s,%s,%s,%s\" % (e.action,", "get_file_stats(base_dir, exclusions, get_checksums=None): if get_checksums is None: want_checksum = lambda filename: False elif", "collections import namedtuple exclusions = ('lost+found', 'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins') joinpath =", "= subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force checksumming\") lscmd.add_argument(\"filepath\", action=\"append\", default=[], nargs='*', help=\"File paths to", "not local_fd: raise CompareError(\"download\", \"missing\") if local_fd.item_type != remote_fd.item_type: if remote_fd.item_type == 'D':", "= lambda filename: False elif not get_checksums: want_checksum = lambda filename: True else:", "exclusions, get_checksums=None): if get_checksums is None: want_checksum = lambda filename: False elif not", "[] else: get_checksums = args.filepath or None for filedesc in get_file_stats(args.base_dir, exclusions, get_checksums):", "local_fd.item_type != remote_fd.item_type: if remote_fd.item_type == 'D': raise CompareError(\"mkdir\", \"changed\") elif remote_fd.item_type ==", "item_type, item_size = 'F', stinf.st_size elif stat.S_ISDIR(stinf.st_mode): item_type, item_size = 'D', 0 else:", "'Newsfeed', '.pki', 'Jenkins') joinpath = posixpath.join FileDesc = namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime',", "/usr/bin/python3 import argparse import hashlib import mmap import os import posixpath import stat", "# Skip top-level cruft dirs[:] = [d for d in dirs if d", "do_checksum, dont_checksum = lambda fn: True, lambda fn: False with open(args.csv, \"r\") as", "None for filedesc in get_file_stats(args.base_dir, exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path)) class", "exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path)) class CompareError(Exception): def __init__(self, action, reason):", "= os.stat(full_path) except: return None if stat.S_ISREG(stinf.st_mode): item_type, item_size = 'F', stinf.st_size elif", "= lambda filename: True else: want_checksum = lambda filename: filename in get_checksums for", "fd: yield fd def ls_cmd(args): if args.checksum: get_checksums = [] else: get_checksums =", "'F' and item_size > 0 and want_checksum(full_path): with open(full_path, 'rb') as fl: mm", "namedtuple exclusions = ('lost+found', 'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins') joinpath = posixpath.join FileDesc", "raise CompareError(\"download\", \"changed\") if remote_fd.size != local_fd.size: raise CompareError(\"download\", \"size\") if remote_fd.checksum !=", "reason def cmp_cmd(args): do_checksum, dont_checksum = lambda fn: True, lambda fn: False with", "d in dirs if d not in exclusions] for filename in files: fd", "remote_fd = FileDesc(line.split(',', 4)) local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum = do_checksum if local_fd.checksum", "in dirs if d not in exclusions] for filename in files: fd =", "stinf.st_size elif stat.S_ISDIR(stinf.st_mode): item_type, item_size = 'D', 0 else: return None checksum =", "dirs if d not in exclusions] for filename in files: fd = get_file_desc(posixpath.join(base,", "in get_checksums for base, dirs, files in os.walk(base_dir): if base == base_dir: #", "default=[], nargs='*', help=\"File paths to check\") lscmd.set_defaults(func=ls_cmd) cmpcmd = subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str, help=\"CSV", "= args.filepath or None for filedesc in get_file_stats(args.base_dir, exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime,", "get_file_stats(args.base_dir, exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path)) class CompareError(Exception): def __init__(self, action,", "fn: False with open(args.csv, \"r\") as fl: for line in (l.strip() for l", "get_checksums=None): if get_checksums is None: want_checksum = lambda filename: False elif not get_checksums:", "dont_checksum = lambda fn: True, lambda fn: False with open(args.csv, \"r\") as fl:", "raise CompareError(\"mkdir\", \"changed\") elif remote_fd.item_type == 'F': raise CompareError(\"download\", \"changed\") if remote_fd.size !=", "> 0 and want_checksum(full_path): with open(full_path, 'rb') as fl: mm = mmap.mmap(fl.fileno(), 0,", "get_checksums: want_checksum = lambda filename: True else: want_checksum = lambda filename: filename in", "access=mmap.ACCESS_READ) checksum = hashlib.md5(mm).hexdigest().lower() mm.close() return FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum) def get_file_stats(base_dir,", "checksum = hashlib.md5(mm).hexdigest().lower() mm.close() return FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum) def get_file_stats(base_dir, exclusions,", "= 'F', stinf.st_size elif stat.S_ISDIR(stinf.st_mode): item_type, item_size = 'D', 0 else: return None", "if get_checksums is None: want_checksum = lambda filename: False elif not get_checksums: want_checksum", "exclusions] for filename in files: fd = get_file_desc(posixpath.join(base, filename), want_checksum) if fd: yield", "__init__(self, action, reason): super().__init__(\"Compare failed\") self.action = action self.reason = reason def cmp_cmd(args):", "= FileDesc(line.split(',', 4)) local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum = do_checksum if local_fd.checksum else", "CompareError(\"#touched\", \"mtime\") except CompareError as e: print(\"%s,%s,%s,%s\" % (e.action, e.reason, remote_fd.mtime, remote_fd.full_path)) if", "type=str, help=\"CSV file to check against\") args = argp.parse_args(sys.argv[1:]) if not hasattr(args, 'func'):", "'Jenkins') joinpath = posixpath.join FileDesc = namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum']) def", "FileDesc(line.split(',', 4)) local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum = do_checksum if local_fd.checksum else dont_checksum", "'F': raise CompareError(\"download\", \"changed\") if remote_fd.size != local_fd.size: raise CompareError(\"download\", \"size\") if remote_fd.checksum", "remote_fd.full_path)) want_checksum = do_checksum if local_fd.checksum else dont_checksum local_fd = get_file_desc(local_path, want_checksum) try:", "for filedesc in get_file_stats(args.base_dir, exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path)) class CompareError(Exception):", "= reason def cmp_cmd(args): do_checksum, dont_checksum = lambda fn: True, lambda fn: False", "CompareError(\"download\", \"size\") if remote_fd.checksum != local_fd.checksum: raise CompareError(\"download\", \"checksum\") if remote_fd.mtime != local_fd.mtime:", "get_checksums = args.filepath or None for filedesc in get_file_stats(args.base_dir, exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size,", "filename in get_checksums for base, dirs, files in os.walk(base_dir): if base == base_dir:", "'.pki', 'Jenkins') joinpath = posixpath.join FileDesc = namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum'])", "fl: for line in (l.strip() for l in fl): if line: remote_fd =", "os.stat(full_path) except: return None if stat.S_ISREG(stinf.st_mode): item_type, item_size = 'F', stinf.st_size elif stat.S_ISDIR(stinf.st_mode):", "\"size\") if remote_fd.checksum != local_fd.checksum: raise CompareError(\"download\", \"checksum\") if remote_fd.mtime != local_fd.mtime: os.utime(local_fd,", "ls_cmd(args): if args.checksum: get_checksums = [] else: get_checksums = args.filepath or None for", "get_file_desc(posixpath.join(base, filename), want_checksum) if fd: yield fd def ls_cmd(args): if args.checksum: get_checksums =", "subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force checksumming\") lscmd.add_argument(\"filepath\", action=\"append\", default=[], nargs='*', help=\"File paths to check\")", "lambda fn: False with open(args.csv, \"r\") as fl: for line in (l.strip() for", "return None checksum = 0 if item_type == 'F' and item_size > 0", "and want_checksum(full_path): with open(full_path, 'rb') as fl: mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ) checksum", "file to check against\") args = argp.parse_args(sys.argv[1:]) if not hasattr(args, 'func'): raise RuntimeError(\"No", "want_checksum) try: if not local_fd: raise CompareError(\"download\", \"missing\") if local_fd.item_type != remote_fd.item_type: if", "= [] else: get_checksums = args.filepath or None for filedesc in get_file_stats(args.base_dir, exclusions,", "in (l.strip() for l in fl): if line: remote_fd = FileDesc(line.split(',', 4)) local_path", "remote_fd.full_path)) if __name__ == \"__main__\": argp = argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\", default=\"/svn\", help=\"Base", "elif remote_fd.item_type == 'F': raise CompareError(\"download\", \"changed\") if remote_fd.size != local_fd.size: raise CompareError(\"download\",", "posixpath.join FileDesc = namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum']) def get_file_desc(full_path, want_checksum): try:", "0 if item_type == 'F' and item_size > 0 and want_checksum(full_path): with open(full_path,", "= get_file_desc(posixpath.join(base, filename), want_checksum) if fd: yield fd def ls_cmd(args): if args.checksum: get_checksums", "= mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ) checksum = hashlib.md5(mm).hexdigest().lower() mm.close() return FileDesc(full_path, item_type, item_size, stinf.st_mtime,", "local_fd = get_file_desc(local_path, want_checksum) try: if not local_fd: raise CompareError(\"download\", \"missing\") if local_fd.item_type", "help=\"CSV file to check against\") args = argp.parse_args(sys.argv[1:]) if not hasattr(args, 'func'): raise", "4)) local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum = do_checksum if local_fd.checksum else dont_checksum local_fd", "and item_size > 0 and want_checksum(full_path): with open(full_path, 'rb') as fl: mm =", "not in exclusions] for filename in files: fd = get_file_desc(posixpath.join(base, filename), want_checksum) if", "raise CompareError(\"#touched\", \"mtime\") except CompareError as e: print(\"%s,%s,%s,%s\" % (e.action, e.reason, remote_fd.mtime, remote_fd.full_path))", "import stat import sys from collections import namedtuple exclusions = ('lost+found', 'restore', 'backup',", "fd = get_file_desc(posixpath.join(base, filename), want_checksum) if fd: yield fd def ls_cmd(args): if args.checksum:", "False elif not get_checksums: want_checksum = lambda filename: True else: want_checksum = lambda", "checksumming\") lscmd.add_argument(\"filepath\", action=\"append\", default=[], nargs='*', help=\"File paths to check\") lscmd.set_defaults(func=ls_cmd) cmpcmd = subp.add_parser(\"cmp\")", "0 and want_checksum(full_path): with open(full_path, 'rb') as fl: mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ)", "d not in exclusions] for filename in files: fd = get_file_desc(posixpath.join(base, filename), want_checksum)", "help=\"File paths to check\") lscmd.set_defaults(func=ls_cmd) cmpcmd = subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str, help=\"CSV file to", "to check\") lscmd.set_defaults(func=ls_cmd) cmpcmd = subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str, help=\"CSV file to check against\")", "\"changed\") if remote_fd.size != local_fd.size: raise CompareError(\"download\", \"size\") if remote_fd.checksum != local_fd.checksum: raise", "line in (l.strip() for l in fl): if line: remote_fd = FileDesc(line.split(',', 4))", "item_type, item_size, stinf.st_mtime, checksum) def get_file_stats(base_dir, exclusions, get_checksums=None): if get_checksums is None: want_checksum", "for d in dirs if d not in exclusions] for filename in files:", "= posixpath.join FileDesc = namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum']) def get_file_desc(full_path, want_checksum):", "def cmp_cmd(args): do_checksum, dont_checksum = lambda fn: True, lambda fn: False with open(args.csv,", "mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ) checksum = hashlib.md5(mm).hexdigest().lower() mm.close() return FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum)", "super().__init__(\"Compare failed\") self.action = action self.reason = reason def cmp_cmd(args): do_checksum, dont_checksum =", "CompareError(Exception): def __init__(self, action, reason): super().__init__(\"Compare failed\") self.action = action self.reason = reason", "get_checksums is None: want_checksum = lambda filename: False elif not get_checksums: want_checksum =", "want_checksum = lambda filename: True else: want_checksum = lambda filename: filename in get_checksums", "mmap import os import posixpath import stat import sys from collections import namedtuple", "import hashlib import mmap import os import posixpath import stat import sys from", "checksum = 0 if item_type == 'F' and item_size > 0 and want_checksum(full_path):", "raise CompareError(\"download\", \"size\") if remote_fd.checksum != local_fd.checksum: raise CompareError(\"download\", \"checksum\") if remote_fd.mtime !=", "nargs='*', help=\"File paths to check\") lscmd.set_defaults(func=ls_cmd) cmpcmd = subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str, help=\"CSV file", "else: want_checksum = lambda filename: filename in get_checksums for base, dirs, files in", "= lambda fn: True, lambda fn: False with open(args.csv, \"r\") as fl: for", "(e.action, e.reason, remote_fd.mtime, remote_fd.full_path)) if __name__ == \"__main__\": argp = argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\",", "cruft dirs[:] = [d for d in dirs if d not in exclusions]", "\"-C\", dest=\"base_dir\", default=\"/svn\", help=\"Base folder\") subp = argp.add_subparsers() lscmd = subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\",", "import argparse import hashlib import mmap import os import posixpath import stat import", "files: fd = get_file_desc(posixpath.join(base, filename), want_checksum) if fd: yield fd def ls_cmd(args): if", "(remote_fd.mtime, remote_fd.mtime)) raise CompareError(\"#touched\", \"mtime\") except CompareError as e: print(\"%s,%s,%s,%s\" % (e.action, e.reason,", "os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum = do_checksum if local_fd.checksum else dont_checksum local_fd = get_file_desc(local_path, want_checksum)", "if fd: yield fd def ls_cmd(args): if args.checksum: get_checksums = [] else: get_checksums", "reason): super().__init__(\"Compare failed\") self.action = action self.reason = reason def cmp_cmd(args): do_checksum, dont_checksum", "item_size > 0 and want_checksum(full_path): with open(full_path, 'rb') as fl: mm = mmap.mmap(fl.fileno(),", "argp = argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\", default=\"/svn\", help=\"Base folder\") subp = argp.add_subparsers() lscmd", "in fl): if line: remote_fd = FileDesc(line.split(',', 4)) local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum", "\"__main__\": argp = argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\", default=\"/svn\", help=\"Base folder\") subp = argp.add_subparsers()", "mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ) checksum = hashlib.md5(mm).hexdigest().lower() mm.close() return FileDesc(full_path, item_type, item_size,", "line: remote_fd = FileDesc(line.split(',', 4)) local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum = do_checksum if", "argparse import hashlib import mmap import os import posixpath import stat import sys", "import posixpath import stat import sys from collections import namedtuple exclusions = ('lost+found',", "#! /usr/bin/python3 import argparse import hashlib import mmap import os import posixpath import", "for filename in files: fd = get_file_desc(posixpath.join(base, filename), want_checksum) if fd: yield fd", "local_fd.size: raise CompareError(\"download\", \"size\") if remote_fd.checksum != local_fd.checksum: raise CompareError(\"download\", \"checksum\") if remote_fd.mtime", "FileDesc(full_path, item_type, item_size, stinf.st_mtime, checksum) def get_file_stats(base_dir, exclusions, get_checksums=None): if get_checksums is None:", "lambda filename: filename in get_checksums for base, dirs, files in os.walk(base_dir): if base", "cmp_cmd(args): do_checksum, dont_checksum = lambda fn: True, lambda fn: False with open(args.csv, \"r\")", "!= local_fd.checksum: raise CompareError(\"download\", \"checksum\") if remote_fd.mtime != local_fd.mtime: os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime)) raise", "if line: remote_fd = FileDesc(line.split(',', 4)) local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum = do_checksum", "['full_path', 'item_type', 'size', 'mtime', 'checksum']) def get_file_desc(full_path, want_checksum): try: stinf = os.stat(full_path) except:", "yield fd def ls_cmd(args): if args.checksum: get_checksums = [] else: get_checksums = args.filepath", "if __name__ == \"__main__\": argp = argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\", default=\"/svn\", help=\"Base folder\")", "remote_fd.mtime != local_fd.mtime: os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime)) raise CompareError(\"#touched\", \"mtime\") except CompareError as e:", "with open(full_path, 'rb') as fl: mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ) checksum = hashlib.md5(mm).hexdigest().lower()", "= argp.parse_args(sys.argv[1:]) if not hasattr(args, 'func'): raise RuntimeError(\"No sub-command specified. See --help for", "if remote_fd.mtime != local_fd.mtime: os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime)) raise CompareError(\"#touched\", \"mtime\") except CompareError as", "import mmap import os import posixpath import stat import sys from collections import", "namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum']) def get_file_desc(full_path, want_checksum): try: stinf = os.stat(full_path)", "fl: mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ) checksum = hashlib.md5(mm).hexdigest().lower() mm.close() return FileDesc(full_path, item_type,", "action=\"append\", default=[], nargs='*', help=\"File paths to check\") lscmd.set_defaults(func=ls_cmd) cmpcmd = subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str,", "base_dir: # Skip top-level cruft dirs[:] = [d for d in dirs if", "as fl: for line in (l.strip() for l in fl): if line: remote_fd", "dest=\"base_dir\", default=\"/svn\", help=\"Base folder\") subp = argp.add_subparsers() lscmd = subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force", "want_checksum = lambda filename: filename in get_checksums for base, dirs, files in os.walk(base_dir):", "action self.reason = reason def cmp_cmd(args): do_checksum, dont_checksum = lambda fn: True, lambda", "in get_file_stats(args.base_dir, exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type, filedesc.size, filedesc.mtime, filedesc.checksum, filedesc.full_path)) class CompareError(Exception): def __init__(self,", "\"mtime\") except CompareError as e: print(\"%s,%s,%s,%s\" % (e.action, e.reason, remote_fd.mtime, remote_fd.full_path)) if __name__", "want_checksum(full_path): with open(full_path, 'rb') as fl: mm = mmap.mmap(fl.fileno(), 0, access=mmap.ACCESS_READ) checksum =", "!= local_fd.mtime: os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime)) raise CompareError(\"#touched\", \"mtime\") except CompareError as e: print(\"%s,%s,%s,%s\"", "exclusions = ('lost+found', 'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins') joinpath = posixpath.join FileDesc =", "get_checksums = [] else: get_checksums = args.filepath or None for filedesc in get_file_stats(args.base_dir,", "== base_dir: # Skip top-level cruft dirs[:] = [d for d in dirs", "= get_file_desc(local_path, want_checksum) try: if not local_fd: raise CompareError(\"download\", \"missing\") if local_fd.item_type !=", "= namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum']) def get_file_desc(full_path, want_checksum): try: stinf =", "elif stat.S_ISDIR(stinf.st_mode): item_type, item_size = 'D', 0 else: return None checksum = 0", "def get_file_stats(base_dir, exclusions, get_checksums=None): if get_checksums is None: want_checksum = lambda filename: False", "= argp.add_subparsers() lscmd = subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force checksumming\") lscmd.add_argument(\"filepath\", action=\"append\", default=[], nargs='*',", "local_fd.checksum: raise CompareError(\"download\", \"checksum\") if remote_fd.mtime != local_fd.mtime: os.utime(local_fd, (remote_fd.mtime, remote_fd.mtime)) raise CompareError(\"#touched\",", "subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str, help=\"CSV file to check against\") args = argp.parse_args(sys.argv[1:]) if not", "if remote_fd.item_type == 'D': raise CompareError(\"mkdir\", \"changed\") elif remote_fd.item_type == 'F': raise CompareError(\"download\",", "'D': raise CompareError(\"mkdir\", \"changed\") elif remote_fd.item_type == 'F': raise CompareError(\"download\", \"changed\") if remote_fd.size", "None if stat.S_ISREG(stinf.st_mode): item_type, item_size = 'F', stinf.st_size elif stat.S_ISDIR(stinf.st_mode): item_type, item_size =", "FileDesc = namedtuple('FileDesc', ['full_path', 'item_type', 'size', 'mtime', 'checksum']) def get_file_desc(full_path, want_checksum): try: stinf", "e.reason, remote_fd.mtime, remote_fd.full_path)) if __name__ == \"__main__\": argp = argparse.ArgumentParser(\"hasher\") argp.add_argument(\"--base-dir\", \"-C\", dest=\"base_dir\",", "else: return None checksum = 0 if item_type == 'F' and item_size >", "import os import posixpath import stat import sys from collections import namedtuple exclusions", "lambda filename: False elif not get_checksums: want_checksum = lambda filename: True else: want_checksum", "local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum = do_checksum if local_fd.checksum else dont_checksum local_fd =", "else: get_checksums = args.filepath or None for filedesc in get_file_stats(args.base_dir, exclusions, get_checksums): print(\"{},{},{},{},{}\".format(filedesc.item_type,", "failed\") self.action = action self.reason = reason def cmp_cmd(args): do_checksum, dont_checksum = lambda", "action=\"store_true\", help=\"Force checksumming\") lscmd.add_argument(\"filepath\", action=\"append\", default=[], nargs='*', help=\"File paths to check\") lscmd.set_defaults(func=ls_cmd) cmpcmd", "default=\"/svn\", help=\"Base folder\") subp = argp.add_subparsers() lscmd = subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force checksumming\")", "remote_fd.item_type == 'F': raise CompareError(\"download\", \"changed\") if remote_fd.size != local_fd.size: raise CompareError(\"download\", \"size\")", "in exclusions] for filename in files: fd = get_file_desc(posixpath.join(base, filename), want_checksum) if fd:", "not get_checksums: want_checksum = lambda filename: True else: want_checksum = lambda filename: filename", "def __init__(self, action, reason): super().__init__(\"Compare failed\") self.action = action self.reason = reason def", "import sys from collections import namedtuple exclusions = ('lost+found', 'restore', 'backup', 'Newsfeed', '.pki',", "lscmd.add_argument(\"csv\", type=str, help=\"CSV file to check against\") args = argp.parse_args(sys.argv[1:]) if not hasattr(args,", "self.reason = reason def cmp_cmd(args): do_checksum, dont_checksum = lambda fn: True, lambda fn:", "except CompareError as e: print(\"%s,%s,%s,%s\" % (e.action, e.reason, remote_fd.mtime, remote_fd.full_path)) if __name__ ==", "filedesc.full_path)) class CompareError(Exception): def __init__(self, action, reason): super().__init__(\"Compare failed\") self.action = action self.reason", "fl): if line: remote_fd = FileDesc(line.split(',', 4)) local_path = os.path.normpath(os.path.join(args.base_dir, remote_fd.full_path)) want_checksum =", "item_size = 'D', 0 else: return None checksum = 0 if item_type ==", "checksum) def get_file_stats(base_dir, exclusions, get_checksums=None): if get_checksums is None: want_checksum = lambda filename:", "os.walk(base_dir): if base == base_dir: # Skip top-level cruft dirs[:] = [d for", "== 'D': raise CompareError(\"mkdir\", \"changed\") elif remote_fd.item_type == 'F': raise CompareError(\"download\", \"changed\") if", "try: stinf = os.stat(full_path) except: return None if stat.S_ISREG(stinf.st_mode): item_type, item_size = 'F',", "base, dirs, files in os.walk(base_dir): if base == base_dir: # Skip top-level cruft", "item_size, stinf.st_mtime, checksum) def get_file_stats(base_dir, exclusions, get_checksums=None): if get_checksums is None: want_checksum =", "'restore', 'backup', 'Newsfeed', '.pki', 'Jenkins') joinpath = posixpath.join FileDesc = namedtuple('FileDesc', ['full_path', 'item_type',", "in os.walk(base_dir): if base == base_dir: # Skip top-level cruft dirs[:] = [d", "want_checksum = do_checksum if local_fd.checksum else dont_checksum local_fd = get_file_desc(local_path, want_checksum) try: if", "'checksum']) def get_file_desc(full_path, want_checksum): try: stinf = os.stat(full_path) except: return None if stat.S_ISREG(stinf.st_mode):", "check\") lscmd.set_defaults(func=ls_cmd) cmpcmd = subp.add_parser(\"cmp\") lscmd.add_argument(\"csv\", type=str, help=\"CSV file to check against\") args", "folder\") subp = argp.add_subparsers() lscmd = subp.add_parser(\"ls\") lscmd.add_argument(\"--checksum\", action=\"store_true\", help=\"Force checksumming\") lscmd.add_argument(\"filepath\", action=\"append\"," ]
[ "0 for i in range(n): for j in range(m): if grid[i][j] == 1", "j+1), (i+1, j), (i+1, j+1)] for (x, y) in neighbors: if in_graph(grid, x,", "neighbors: if in_graph(grid, x, y) and (x, y) not in visited and grid[x][y]", "def max_region(grid): visited = set() n = len(grid) m = len(grid[0]) max_value =", "Preparation Kit/Graphs/DFS: Connected Cell in a Grid/connected_cells.py<gh_stars>1-10 #!/bin/python3 import math import os import", "math import os import random import re import sys # Complete the maxRegion", "import os import random import re import sys # Complete the maxRegion function", "(i, j+1), (i+1, j), (i+1, j+1)] for (x, y) in neighbors: if in_graph(grid,", "y) and (x, y) not in visited and grid[x][y] == 1: ans +=", "j+1)] for (x, y) in neighbors: if in_graph(grid, x, y) and (x, y)", "i, j) max_value = max(max_value, ans) return max_value if __name__ == '__main__': fptr", "j-1), (i-1, j+1), (i, j+1), (i+1, j), (i+1, j+1)] for (x, y) in", "ans def max_region(grid): visited = set() n = len(grid) m = len(grid[0]) max_value", "j in range(m): if grid[i][j] == 1 and (i, j) not in visited:", "Cell in a Grid/connected_cells.py<gh_stars>1-10 #!/bin/python3 import math import os import random import re", "= int(input()) grid = [] for _ in range(n): grid.append(list(map(int, input().rstrip().split()))) res =", "= [] for _ in range(n): grid.append(list(map(int, input().rstrip().split()))) res = max_region(grid) fptr.write(str(res) +", "in neighbors: if in_graph(grid, x, y) and (x, y) not in visited and", "fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) m = int(input()) grid = []", "import math import os import random import re import sys # Complete the", "j): visited.add((i, j)) ans = 1 neighbors = [(i-1, j-1), (i, j-1), (i-1,", "(x, y) in neighbors: if in_graph(grid, x, y) and (x, y) not in", "m = int(input()) grid = [] for _ in range(n): grid.append(list(map(int, input().rstrip().split()))) res", "below. def in_graph(grid, i, j): n = len(grid) m = len(grid[0]) return i", "grid[i][j] == 1 and (i, j) not in visited: ans = dfs(grid, visited,", "visited, x, y) return ans def max_region(grid): visited = set() n = len(grid)", "in range(m): if grid[i][j] == 1 and (i, j) not in visited: ans", "(i+1, j-1), (i-1, j+1), (i, j+1), (i+1, j), (i+1, j+1)] for (x, y)", "i < n and j < m def dfs(grid, visited, i, j): visited.add((i,", "< m def dfs(grid, visited, i, j): visited.add((i, j)) ans = 1 neighbors", "y) not in visited and grid[x][y] == 1: ans += dfs(grid, visited, x,", "max_region(grid): visited = set() n = len(grid) m = len(grid[0]) max_value = 0", "= len(grid) m = len(grid[0]) return i >= 0 and j >= 0", "= open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) m = int(input()) grid = [] for", "visited: ans = dfs(grid, visited, i, j) max_value = max(max_value, ans) return max_value", "and (i, j) not in visited: ans = dfs(grid, visited, i, j) max_value", "int(input()) grid = [] for _ in range(n): grid.append(list(map(int, input().rstrip().split()))) res = max_region(grid)", "dfs(grid, visited, x, y) return ans def max_region(grid): visited = set() n =", "not in visited: ans = dfs(grid, visited, i, j) max_value = max(max_value, ans)", "the maxRegion function below. def in_graph(grid, i, j): n = len(grid) m =", "Kits/Interview Preparation Kit/Graphs/DFS: Connected Cell in a Grid/connected_cells.py<gh_stars>1-10 #!/bin/python3 import math import os", "sys # Complete the maxRegion function below. def in_graph(grid, i, j): n =", "1 neighbors = [(i-1, j-1), (i, j-1), (i-1, j), (i+1, j-1), (i-1, j+1),", "return max_value if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input())", "n = int(input()) m = int(input()) grid = [] for _ in range(n):", "len(grid[0]) return i >= 0 and j >= 0 and i < n", "and (x, y) not in visited and grid[x][y] == 1: ans += dfs(grid,", "= len(grid[0]) return i >= 0 and j >= 0 and i <", "len(grid) m = len(grid[0]) return i >= 0 and j >= 0 and", "max_value = max(max_value, ans) return max_value if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'],", "and grid[x][y] == 1: ans += dfs(grid, visited, x, y) return ans def", "dfs(grid, visited, i, j): visited.add((i, j)) ans = 1 neighbors = [(i-1, j-1),", "visited = set() n = len(grid) m = len(grid[0]) max_value = 0 for", "== 1 and (i, j) not in visited: ans = dfs(grid, visited, i,", "j+1), (i, j+1), (i+1, j), (i+1, j+1)] for (x, y) in neighbors: if", "neighbors = [(i-1, j-1), (i, j-1), (i-1, j), (i+1, j-1), (i-1, j+1), (i,", "j) not in visited: ans = dfs(grid, visited, i, j) max_value = max(max_value,", "(i, j-1), (i-1, j), (i+1, j-1), (i-1, j+1), (i, j+1), (i+1, j), (i+1,", "random import re import sys # Complete the maxRegion function below. def in_graph(grid,", "import re import sys # Complete the maxRegion function below. def in_graph(grid, i,", "Connected Cell in a Grid/connected_cells.py<gh_stars>1-10 #!/bin/python3 import math import os import random import", "ans) return max_value if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n =", "dfs(grid, visited, i, j) max_value = max(max_value, ans) return max_value if __name__ ==", "j-1), (i-1, j), (i+1, j-1), (i-1, j+1), (i, j+1), (i+1, j), (i+1, j+1)]", "m def dfs(grid, visited, i, j): visited.add((i, j)) ans = 1 neighbors =", "visited and grid[x][y] == 1: ans += dfs(grid, visited, x, y) return ans", "j): n = len(grid) m = len(grid[0]) return i >= 0 and j", "range(n): for j in range(m): if grid[i][j] == 1 and (i, j) not", "x, y) return ans def max_region(grid): visited = set() n = len(grid) m", "n = len(grid) m = len(grid[0]) return i >= 0 and j >=", "Grid/connected_cells.py<gh_stars>1-10 #!/bin/python3 import math import os import random import re import sys #", "ans += dfs(grid, visited, x, y) return ans def max_region(grid): visited = set()", "open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) m = int(input()) grid = [] for _", "re import sys # Complete the maxRegion function below. def in_graph(grid, i, j):", "= 1 neighbors = [(i-1, j-1), (i, j-1), (i-1, j), (i+1, j-1), (i-1,", "and i < n and j < m def dfs(grid, visited, i, j):", "1 and (i, j) not in visited: ans = dfs(grid, visited, i, j)", "import sys # Complete the maxRegion function below. def in_graph(grid, i, j): n", "return i >= 0 and j >= 0 and i < n and", "grid = [] for _ in range(n): grid.append(list(map(int, input().rstrip().split()))) res = max_region(grid) fptr.write(str(res)", "Preparation Kits/Interview Preparation Kit/Graphs/DFS: Connected Cell in a Grid/connected_cells.py<gh_stars>1-10 #!/bin/python3 import math import", "len(grid) m = len(grid[0]) max_value = 0 for i in range(n): for j", "in visited and grid[x][y] == 1: ans += dfs(grid, visited, x, y) return", "0 and i < n and j < m def dfs(grid, visited, i,", "i >= 0 and j >= 0 and i < n and j", "and j < m def dfs(grid, visited, i, j): visited.add((i, j)) ans =", "j), (i+1, j+1)] for (x, y) in neighbors: if in_graph(grid, x, y) and", "len(grid[0]) max_value = 0 for i in range(n): for j in range(m): if", "import random import re import sys # Complete the maxRegion function below. def", "m = len(grid[0]) return i >= 0 and j >= 0 and i", "max_value = 0 for i in range(n): for j in range(m): if grid[i][j]", "y) in neighbors: if in_graph(grid, x, y) and (x, y) not in visited", "function below. def in_graph(grid, i, j): n = len(grid) m = len(grid[0]) return", "= len(grid) m = len(grid[0]) max_value = 0 for i in range(n): for", "visited.add((i, j)) ans = 1 neighbors = [(i-1, j-1), (i, j-1), (i-1, j),", "# Complete the maxRegion function below. def in_graph(grid, i, j): n = len(grid)", "= set() n = len(grid) m = len(grid[0]) max_value = 0 for i", "max_value if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) m", ">= 0 and j >= 0 and i < n and j <", "j < m def dfs(grid, visited, i, j): visited.add((i, j)) ans = 1", "= [(i-1, j-1), (i, j-1), (i-1, j), (i+1, j-1), (i-1, j+1), (i, j+1),", "== '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) m = int(input()) grid", "n = len(grid) m = len(grid[0]) max_value = 0 for i in range(n):", "in_graph(grid, i, j): n = len(grid) m = len(grid[0]) return i >= 0", "(i+1, j+1)] for (x, y) in neighbors: if in_graph(grid, x, y) and (x,", "j), (i+1, j-1), (i-1, j+1), (i, j+1), (i+1, j), (i+1, j+1)] for (x,", "if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) m =", "not in visited and grid[x][y] == 1: ans += dfs(grid, visited, x, y)", "and j >= 0 and i < n and j < m def", "0 and j >= 0 and i < n and j < m", "j)) ans = 1 neighbors = [(i-1, j-1), (i, j-1), (i-1, j), (i+1,", ">= 0 and i < n and j < m def dfs(grid, visited,", "visited, i, j): visited.add((i, j)) ans = 1 neighbors = [(i-1, j-1), (i,", "(i-1, j+1), (i, j+1), (i+1, j), (i+1, j+1)] for (x, y) in neighbors:", "in range(n): for j in range(m): if grid[i][j] == 1 and (i, j)", "<filename>Interview Preparation Kits/Interview Preparation Kit/Graphs/DFS: Connected Cell in a Grid/connected_cells.py<gh_stars>1-10 #!/bin/python3 import math", "(x, y) not in visited and grid[x][y] == 1: ans += dfs(grid, visited,", "+= dfs(grid, visited, x, y) return ans def max_region(grid): visited = set() n", "ans = dfs(grid, visited, i, j) max_value = max(max_value, ans) return max_value if", "max(max_value, ans) return max_value if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n", "Complete the maxRegion function below. def in_graph(grid, i, j): n = len(grid) m", "ans = 1 neighbors = [(i-1, j-1), (i, j-1), (i-1, j), (i+1, j-1),", "maxRegion function below. def in_graph(grid, i, j): n = len(grid) m = len(grid[0])", "os import random import re import sys # Complete the maxRegion function below.", "m = len(grid[0]) max_value = 0 for i in range(n): for j in", "grid[x][y] == 1: ans += dfs(grid, visited, x, y) return ans def max_region(grid):", "(i+1, j), (i+1, j+1)] for (x, y) in neighbors: if in_graph(grid, x, y)", "range(m): if grid[i][j] == 1 and (i, j) not in visited: ans =", "(i, j) not in visited: ans = dfs(grid, visited, i, j) max_value =", "return ans def max_region(grid): visited = set() n = len(grid) m = len(grid[0])", "a Grid/connected_cells.py<gh_stars>1-10 #!/bin/python3 import math import os import random import re import sys", "== 1: ans += dfs(grid, visited, x, y) return ans def max_region(grid): visited", "'__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) m = int(input()) grid =", "i in range(n): for j in range(m): if grid[i][j] == 1 and (i,", "in visited: ans = dfs(grid, visited, i, j) max_value = max(max_value, ans) return", "[(i-1, j-1), (i, j-1), (i-1, j), (i+1, j-1), (i-1, j+1), (i, j+1), (i+1,", "< n and j < m def dfs(grid, visited, i, j): visited.add((i, j))", "1: ans += dfs(grid, visited, x, y) return ans def max_region(grid): visited =", "def in_graph(grid, i, j): n = len(grid) m = len(grid[0]) return i >=", "if in_graph(grid, x, y) and (x, y) not in visited and grid[x][y] ==", "= int(input()) m = int(input()) grid = [] for _ in range(n): grid.append(list(map(int,", "int(input()) m = int(input()) grid = [] for _ in range(n): grid.append(list(map(int, input().rstrip().split())))", "__name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') n = int(input()) m = int(input())", "y) return ans def max_region(grid): visited = set() n = len(grid) m =", "i, j): n = len(grid) m = len(grid[0]) return i >= 0 and", "[] for _ in range(n): grid.append(list(map(int, input().rstrip().split()))) res = max_region(grid) fptr.write(str(res) + '\\n')", "= dfs(grid, visited, i, j) max_value = max(max_value, ans) return max_value if __name__", "(i-1, j), (i+1, j-1), (i-1, j+1), (i, j+1), (i+1, j), (i+1, j+1)] for", "n and j < m def dfs(grid, visited, i, j): visited.add((i, j)) ans", "Kit/Graphs/DFS: Connected Cell in a Grid/connected_cells.py<gh_stars>1-10 #!/bin/python3 import math import os import random", "#!/bin/python3 import math import os import random import re import sys # Complete", "if grid[i][j] == 1 and (i, j) not in visited: ans = dfs(grid,", "for (x, y) in neighbors: if in_graph(grid, x, y) and (x, y) not", "i, j): visited.add((i, j)) ans = 1 neighbors = [(i-1, j-1), (i, j-1),", "in_graph(grid, x, y) and (x, y) not in visited and grid[x][y] == 1:", "set() n = len(grid) m = len(grid[0]) max_value = 0 for i in", "for i in range(n): for j in range(m): if grid[i][j] == 1 and", "= len(grid[0]) max_value = 0 for i in range(n): for j in range(m):", "for _ in range(n): grid.append(list(map(int, input().rstrip().split()))) res = max_region(grid) fptr.write(str(res) + '\\n') fptr.close()", "for j in range(m): if grid[i][j] == 1 and (i, j) not in", "j) max_value = max(max_value, ans) return max_value if __name__ == '__main__': fptr =", "x, y) and (x, y) not in visited and grid[x][y] == 1: ans", "= 0 for i in range(n): for j in range(m): if grid[i][j] ==", "j-1), (i, j-1), (i-1, j), (i+1, j-1), (i-1, j+1), (i, j+1), (i+1, j),", "visited, i, j) max_value = max(max_value, ans) return max_value if __name__ == '__main__':", "def dfs(grid, visited, i, j): visited.add((i, j)) ans = 1 neighbors = [(i-1,", "in a Grid/connected_cells.py<gh_stars>1-10 #!/bin/python3 import math import os import random import re import", "j >= 0 and i < n and j < m def dfs(grid,", "= max(max_value, ans) return max_value if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w')", "'w') n = int(input()) m = int(input()) grid = [] for _ in" ]
[ "128) X_sharded = BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local = X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local", "BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() == X)) def test_multiple_shard_index_get(self): X = np.random.randn(128,", "def test_complex_slices(self): X = np.random.randn(21, 67, 53) shard_sizes = [21, 16, 11] X_sharded", "shard_sizes = [16, 16] X_sharded = BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32] ==", "BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:,", "X) X_sharded_local = X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local == X)) def test_single_shard_index_put(self): X = np.random.randn(128,", "[6, 8, 4]).numpy()[:, ::16])) def test_complex_slices(self): X = np.random.randn(21, 67, 53) shard_sizes =", "shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() == X)) def test_multiple_shard_index_get(self): X = np.random.randn(128, 128) shard_sizes", "X_sharded.submatrix( None, [0, 6, 4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64] == X_sharded.submatrix( None, [6, 8,", "as np import pywren import unittest class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X = np.random.randn(128,", "np.random.randn(128, 128) X_sharded = BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local = X_sharded.submatrix(0, 0).get_block()", "= BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] ==", "shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128]", "== X_sharded.submatrix( None, [0, 6, 4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64] == X_sharded.submatrix( None, [6,", "as datasets from numpywren.matrix import BigMatrix from numpywren import matrix_utils, binops from numpywren.matrix_init", "BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32] == X_sharded.submatrix( [None, None, 2]).numpy()[::16])) assert(np.all(X[16::32] ==", "[0, 6, 4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64] == X_sharded.submatrix( None, [6, 8, 4]).numpy()[:, ::16]))", "assert(np.all(X[:, 96:128:64] == X_sharded.submatrix( None, [6, 8, 4]).numpy()[:, ::16])) def test_complex_slices(self): X =", "::16])) def test_complex_slices(self): X = np.random.randn(21, 67, 53) shard_sizes = [21, 16, 11]", "= BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32] == X_sharded.submatrix( [None, None, 2]).numpy()[::16])) assert(np.all(X[16::32]", "X_sharded = BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128]", "import pywren import unittest class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X = np.random.randn(128, 128) X_sharded", "numpy as np import pywren import unittest class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X =", "53) shard_sizes = [21, 16, 11] X_sharded = BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X)", "shard_sizes = [64, 64] X_sharded = BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64]", "32] X_sharded = BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] ==", "shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy())) assert(np.all(X[:, 64:67,", "test_single_shard_index_put(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy()", "X) assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy())) assert(np.all(X[:, 64:67, 44:53] == X_sharded.submatrix(0,", "11] X_sharded = BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0,", "class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape)", "np.random.randn(128, 128) shard_sizes = [16, 16] X_sharded = BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X)", "0).put_block(X) assert(np.all(X_sharded.numpy() == X)) def test_multiple_shard_index_get(self): X = np.random.randn(128, 128) shard_sizes = [64,", "shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local = X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local == X)) def test_single_shard_index_put(self):", "def test_simple_slices(self): X = np.random.randn(128, 128) shard_sizes = [32, 32] X_sharded = BigMatrix(\"test_3\",", ":16, :11] == X_sharded.submatrix(0, 0, 0).numpy())) assert(np.all(X[:, 64:67, 44:53] == X_sharded.submatrix(0, 4, 4).numpy()))", "== X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128,", "X_sharded = BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() == X)) def test_multiple_shard_index_get(self): X", "X_sharded.submatrix( [1, None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] == X_sharded.submatrix( None, [0, 6, 4]).numpy()[:, ::16]))", "assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy())) assert(np.all(X[:, 64:67, 44:53] == X_sharded.submatrix(0, 4,", "test_complex_slices(self): X = np.random.randn(21, 67, 53) shard_sizes = [21, 16, 11] X_sharded =", "assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64] ==", "assert(np.all(X[::32] == X_sharded.submatrix( [None, None, 2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix( [1, None, 2]).numpy()[::16])) assert(np.all(X[:,", "shard_sizes = [32, 32] X_sharded = BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64] ==", "X_sharded = BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local = X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local ==", "X_sharded = BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32] == X_sharded.submatrix( [None, None, 2]).numpy()[::16]))", "X = np.random.randn(128, 128) shard_sizes = [64, 64] X_sharded = BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes)", "def test_single_shard_index_get(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X)", "128) shard_sizes = [64, 64] X_sharded = BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64,", "assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:, 96:128] ==", "== X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None, 0).get_block(1)))", "128) X_sharded = BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() == X)) def test_multiple_shard_index_get(self):", "[64, 64] X_sharded = BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0)))", "X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:, 96:128] == X_sharded.submatrix( None,", "X_sharded = BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2,", "shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64,", "shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32] == X_sharded.submatrix( [None, None, 2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix(", "assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:, 96:128] == X_sharded.submatrix( None, [3, None]).numpy()))", "X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() ==", "128) shard_sizes = [32, 32] X_sharded = BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64]", "def test_step_slices(self): X = np.random.randn(128, 128) shard_sizes = [16, 16] X_sharded = BigMatrix(\"test_4\",", "from numpywren.matrix import BigMatrix from numpywren import matrix_utils, binops from numpywren.matrix_init import shard_matrix", "assert(np.all(X_sharded.numpy() == X)) def test_multiple_shard_index_get(self): X = np.random.randn(128, 128) shard_sizes = [64, 64]", "== X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:, 96:128] == X_sharded.submatrix( None, [3, None]).numpy())) def test_step_slices(self):", "assert(np.all(X[16::32] == X_sharded.submatrix( [1, None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] == X_sharded.submatrix( None, [0, 6,", "None, [0, 6, 4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64] == X_sharded.submatrix( None, [6, 8, 4]).numpy()[:,", "test_single_shard_index_get(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local", "shard_matrix import pytest import numpy as np import pywren import unittest class IndexingTestClass(unittest.TestCase):", "numpywren import matrix_utils, binops from numpywren.matrix_init import shard_matrix import pytest import numpy as", "96:128:64] == X_sharded.submatrix( None, [6, 8, 4]).numpy()[:, ::16])) def test_complex_slices(self): X = np.random.randn(21,", "None, [6, 8, 4]).numpy()[:, ::16])) def test_complex_slices(self): X = np.random.randn(21, 67, 53) shard_sizes", "= [16, 16] X_sharded = BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32] == X_sharded.submatrix(", "2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] == X_sharded.submatrix( None, [0, 6, 4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64] ==", "BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy())) assert(np.all(X[:,", "X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() == X)) def test_multiple_shard_index_get(self): X = np.random.randn(128, 128) shard_sizes =", "96:128] == X_sharded.submatrix( None, [3, None]).numpy())) def test_step_slices(self): X = np.random.randn(128, 128) shard_sizes", "assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy()))", "sklearn.datasets as datasets from numpywren.matrix import BigMatrix from numpywren import matrix_utils, binops from", "0).get_block(1))) def test_simple_slices(self): X = np.random.randn(128, 128) shard_sizes = [32, 32] X_sharded =", "shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1, 1).get_block()))", "numpywren.matrix import BigMatrix from numpywren import matrix_utils, binops from numpywren.matrix_init import shard_matrix import", "== X_sharded.submatrix( [None, None, 2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix( [1, None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64]", "4]).numpy()[:, ::16])) def test_complex_slices(self): X = np.random.randn(21, 67, 53) shard_sizes = [21, 16,", "== X)) def test_multiple_shard_index_get(self): X = np.random.randn(128, 128) shard_sizes = [64, 64] X_sharded", "X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64]", "X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self): X = np.random.randn(128, 128)", "np.random.randn(21, 67, 53) shard_sizes = [21, 16, 11] X_sharded = BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes)", "None, 2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix( [1, None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] == X_sharded.submatrix( None,", "BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local = X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local == X)) def", "[None, None, 2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix( [1, None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] == X_sharded.submatrix(", "64] X_sharded = BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128,", "= BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local = X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local == X))", "unittest class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_0\", shape=X.shape,", "None, [3, None]).numpy())) def test_step_slices(self): X = np.random.randn(128, 128) shard_sizes = [16, 16]", "= np.random.randn(128, 128) shard_sizes = [64, 64] X_sharded = BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded,", "[1, None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] == X_sharded.submatrix( None, [0, 6, 4]).numpy()[:, ::16])) assert(np.all(X[:,", "shard_sizes = [21, 16, 11] X_sharded = BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:,", "16, 11] X_sharded = BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:, :16, :11] ==", "X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self): X = np.random.randn(128, 128) shard_sizes = [32, 32] X_sharded", "= np.random.randn(128, 128) X_sharded = BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local = X_sharded.submatrix(0,", "test_simple_slices(self): X = np.random.randn(128, 128) shard_sizes = [32, 32] X_sharded = BigMatrix(\"test_3\", shape=X.shape,", "= BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy()))", "import sklearn.datasets as datasets from numpywren.matrix import BigMatrix from numpywren import matrix_utils, binops", "np import pywren import unittest class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X = np.random.randn(128, 128)", "np.random.randn(128, 128) shard_sizes = [32, 32] X_sharded = BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X)", "X = np.random.randn(128, 128) shard_sizes = [16, 16] X_sharded = BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes)", "assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self): X", "shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local = X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local == X)) def test_single_shard_index_put(self): X", "X)) def test_multiple_shard_index_get(self): X = np.random.randn(128, 128) shard_sizes = [64, 64] X_sharded =", "= X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local == X)) def test_single_shard_index_put(self): X = np.random.randn(128, 128) X_sharded", "X) assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128] ==", "None]).numpy())) assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:, 96:128] == X_sharded.submatrix( None, [3,", "assert(np.all(X_sharded_local == X)) def test_single_shard_index_put(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_1\", shape=X.shape,", "== X)) def test_single_shard_index_put(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape)", "[0, 3]).numpy())) assert(np.all(X[:, 96:128] == X_sharded.submatrix( None, [3, None]).numpy())) def test_step_slices(self): X =", "X_sharded_local = X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local == X)) def test_single_shard_index_put(self): X = np.random.randn(128, 128)", "X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local == X)) def test_single_shard_index_put(self): X = np.random.randn(128, 128) X_sharded =", "0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0, 1).get_block()))", "= np.random.randn(128, 128) shard_sizes = [32, 32] X_sharded = BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded,", "== X_sharded.submatrix( None, [6, 8, 4]).numpy()[:, ::16])) def test_complex_slices(self): X = np.random.randn(21, 67,", "4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64] == X_sharded.submatrix( None, [6, 8, 4]).numpy()[:, ::16])) def test_complex_slices(self):", "[16, 16] X_sharded = BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32] == X_sharded.submatrix( [None,", "import pytest import numpy as np import pywren import unittest class IndexingTestClass(unittest.TestCase): def", "X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded, X) X_sharded_local =", "shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96]", "def test_single_shard_index_put(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X)", "BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1,", "from numpywren import matrix_utils, binops from numpywren.matrix_init import shard_matrix import pytest import numpy", "import shard_matrix import pytest import numpy as np import pywren import unittest class", "np.random.randn(128, 128) shard_sizes = [64, 64] X_sharded = BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X)", "shard_matrix(X_sharded, X) X_sharded_local = X_sharded.submatrix(0, 0).get_block() assert(np.all(X_sharded_local == X)) def test_single_shard_index_put(self): X =", "test_multiple_shard_index_get(self): X = np.random.randn(128, 128) shard_sizes = [64, 64] X_sharded = BigMatrix(\"test_2\", shape=X.shape,", "1).get_block())) assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self): X = np.random.randn(128, 128) shard_sizes", "pywren import unittest class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X = np.random.randn(128, 128) X_sharded =", "== X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:,", "= BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy()))", "import BigMatrix from numpywren import matrix_utils, binops from numpywren.matrix_init import shard_matrix import pytest", "= np.random.randn(128, 128) X_sharded = BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() == X))", "3]).numpy())) assert(np.all(X[:, 96:128] == X_sharded.submatrix( None, [3, None]).numpy())) def test_step_slices(self): X = np.random.randn(128,", "64:128] == X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None,", "BigMatrix from numpywren import matrix_utils, binops from numpywren.matrix_init import shard_matrix import pytest import", "== X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self): X = np.random.randn(128,", "== X_sharded.submatrix( [1, None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] == X_sharded.submatrix( None, [0, 6, 4]).numpy()[:,", "= [21, 16, 11] X_sharded = BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:, :16,", "np.random.randn(128, 128) X_sharded = BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() == X)) def", "assert(np.all(X[0:64, 0:64] == X_sharded.submatrix(0).get_block(0))) assert(np.all(X[64:128, 64:128] == X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0,", "numpywren.matrix_init import shard_matrix import pytest import numpy as np import pywren import unittest", "import matrix_utils, binops from numpywren.matrix_init import shard_matrix import pytest import numpy as np", "IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_0\", shape=X.shape, shard_sizes=X.shape) shard_matrix(X_sharded,", "== X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:, 96:128] == X_sharded.submatrix(", "0:96] == X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:, 96:128] == X_sharded.submatrix( None, [3, None]).numpy())) def", "[21, 16, 11] X_sharded = BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:, :16, :11]", "X_sharded = BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0,", "X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0,", "X_sharded.submatrix(1, 1).get_block())) assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None, 0).get_block(1))) def", "0:64] == X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self): X = np.random.randn(128, 128) shard_sizes = [32,", "import numpy as np import pywren import unittest class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X", "shard_matrix(X_sharded, X) assert(np.all(X[::32] == X_sharded.submatrix( [None, None, 2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix( [1, None,", "X_sharded.submatrix( None, [6, 8, 4]).numpy()[:, ::16])) def test_complex_slices(self): X = np.random.randn(21, 67, 53)", "= [64, 64] X_sharded = BigMatrix(\"test_2\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64, 0:64] ==", "64:128] == X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self): X =", "assert(np.all(X[:, 96:128] == X_sharded.submatrix( None, [3, None]).numpy())) def test_step_slices(self): X = np.random.randn(128, 128)", "def test_multiple_shard_index_get(self): X = np.random.randn(128, 128) shard_sizes = [64, 64] X_sharded = BigMatrix(\"test_2\",", "0:96:64] == X_sharded.submatrix( None, [0, 6, 4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64] == X_sharded.submatrix( None,", "datasets from numpywren.matrix import BigMatrix from numpywren import matrix_utils, binops from numpywren.matrix_init import", "binops from numpywren.matrix_init import shard_matrix import pytest import numpy as np import pywren", "[32, 32] X_sharded = BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128]", "8, 4]).numpy()[:, ::16])) def test_complex_slices(self): X = np.random.randn(21, 67, 53) shard_sizes = [21,", "X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:, 96:128] == X_sharded.submatrix( None, [3, None]).numpy())) def test_step_slices(self): X", "assert(np.all(X[:, 0:96:64] == X_sharded.submatrix( None, [0, 6, 4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64] == X_sharded.submatrix(", "pytest import numpy as np import pywren import unittest class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self):", "shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96] ==", "shard_matrix(X_sharded, X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96] == X_sharded.submatrix(None,", "shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32] == X_sharded.submatrix( [None, None, 2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix( [1,", "matrix_utils, binops from numpywren.matrix_init import shard_matrix import pytest import numpy as np import", "X_sharded.submatrix( None, [3, None]).numpy())) def test_step_slices(self): X = np.random.randn(128, 128) shard_sizes = [16,", "from numpywren.matrix_init import shard_matrix import pytest import numpy as np import pywren import", "= np.random.randn(21, 67, 53) shard_sizes = [21, 16, 11] X_sharded = BigMatrix(\"test_5\", shape=X.shape,", "X = np.random.randn(128, 128) shard_sizes = [32, 32] X_sharded = BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes)", "1).get_block())) assert(np.all(X[0:64, 64:128] == X_sharded.submatrix(0, 1).get_block())) assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self):", "assert(np.all(X[64:128, 0:64] == X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self): X = np.random.randn(128, 128) shard_sizes =", "2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix( [1, None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] == X_sharded.submatrix( None, [0,", "import unittest class IndexingTestClass(unittest.TestCase): def test_single_shard_index_get(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_0\",", "= np.random.randn(128, 128) shard_sizes = [16, 16] X_sharded = BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded,", "None]).numpy())) def test_step_slices(self): X = np.random.randn(128, 128) shard_sizes = [16, 16] X_sharded =", "== X_sharded.submatrix(None, 0).get_block(1))) def test_simple_slices(self): X = np.random.randn(128, 128) shard_sizes = [32, 32]", "::16])) assert(np.all(X[:, 96:128:64] == X_sharded.submatrix( None, [6, 8, 4]).numpy()[:, ::16])) def test_complex_slices(self): X", "shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() == X)) def test_multiple_shard_index_get(self): X = np.random.randn(128, 128)", "0).get_block() assert(np.all(X_sharded_local == X)) def test_single_shard_index_put(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_1\",", "X_sharded.submatrix( [None, None, 2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix( [1, None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] ==", "= [32, 32] X_sharded = BigMatrix(\"test_3\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[0:64] == X_sharded.submatrix([2]).numpy()))", "== X_sharded.submatrix( None, [3, None]).numpy())) def test_step_slices(self): X = np.random.randn(128, 128) shard_sizes =", "shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy())) assert(np.all(X[:, 64:67, 44:53]", "16] X_sharded = BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32] == X_sharded.submatrix( [None, None,", "X_sharded.submatrix([2]).numpy())) assert(np.all(X[64:128] == X_sharded.submatrix([2, None]).numpy())) assert(np.all(X[:, 0:96] == X_sharded.submatrix(None, [0, 3]).numpy())) assert(np.all(X[:, 96:128]", "X = np.random.randn(21, 67, 53) shard_sizes = [21, 16, 11] X_sharded = BigMatrix(\"test_5\",", "shard_matrix(X_sharded, X) assert(np.all(X[:, :16, :11] == X_sharded.submatrix(0, 0, 0).numpy())) assert(np.all(X[:, 64:67, 44:53] ==", "X)) def test_single_shard_index_put(self): X = np.random.randn(128, 128) X_sharded = BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0,", "X) assert(np.all(X[::32] == X_sharded.submatrix( [None, None, 2]).numpy()[::16])) assert(np.all(X[16::32] == X_sharded.submatrix( [1, None, 2]).numpy()[::16]))", "None, 2]).numpy()[::16])) assert(np.all(X[:, 0:96:64] == X_sharded.submatrix( None, [0, 6, 4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64]", "test_step_slices(self): X = np.random.randn(128, 128) shard_sizes = [16, 16] X_sharded = BigMatrix(\"test_4\", shape=X.shape,", "[3, None]).numpy())) def test_step_slices(self): X = np.random.randn(128, 128) shard_sizes = [16, 16] X_sharded", "128) shard_sizes = [16, 16] X_sharded = BigMatrix(\"test_4\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded, X) assert(np.all(X[::32]", "67, 53) shard_sizes = [21, 16, 11] X_sharded = BigMatrix(\"test_5\", shape=X.shape, shard_sizes=shard_sizes) shard_matrix(X_sharded,", "6, 4]).numpy()[:, ::16])) assert(np.all(X[:, 96:128:64] == X_sharded.submatrix( None, [6, 8, 4]).numpy()[:, ::16])) def", "= BigMatrix(\"test_1\", shape=X.shape, shard_sizes=X.shape) X_sharded.submatrix(0, 0).put_block(X) assert(np.all(X_sharded.numpy() == X)) def test_multiple_shard_index_get(self): X =" ]
[ "<= 50\",4:\"credit_history_lenght >= 3\"} result_dict = get_rules_for_individual(ind) expected_dict = {0:\"age <= 70\",3:\"no_of_primary_accts <=", ">= 3\"} result_dict = get_rules_for_individual(ind) expected_dict = {0:\"age <= 70\",3:\"no_of_primary_accts <= 50\"} assert", "def test_get_rule_for_individual(): ind = [1,0,1,0] rule_dict = {0:\"age <= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght", "70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght >= 3\"} result_dict = get_rules_for_individual(ind) expected_dict = {0:\"age <=", "= {0:\"age <= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght >= 3\"} result_dict = get_rules_for_individual(ind) expected_dict", "from kooptimize.korules import get_rules_for_individual def test_get_rule_for_individual(): ind = [1,0,1,0] rule_dict = {0:\"age <=", "pytest from kooptimize.korules import get_rules_for_individual def test_get_rule_for_individual(): ind = [1,0,1,0] rule_dict = {0:\"age", "test_get_rule_for_individual(): ind = [1,0,1,0] rule_dict = {0:\"age <= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght >=", "50\",4:\"credit_history_lenght >= 3\"} result_dict = get_rules_for_individual(ind) expected_dict = {0:\"age <= 70\",3:\"no_of_primary_accts <= 50\"}", "{0:\"age <= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght >= 3\"} result_dict = get_rules_for_individual(ind) expected_dict =", "<= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght >= 3\"} result_dict = get_rules_for_individual(ind) expected_dict = {0:\"age", "result_dict = get_rules_for_individual(ind) expected_dict = {0:\"age <= 70\",3:\"no_of_primary_accts <= 50\"} assert result_dict ==", "ind = [1,0,1,0] rule_dict = {0:\"age <= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght >= 3\"}", "<gh_stars>0 import pytest from kooptimize.korules import get_rules_for_individual def test_get_rule_for_individual(): ind = [1,0,1,0] rule_dict", "kooptimize.korules import get_rules_for_individual def test_get_rule_for_individual(): ind = [1,0,1,0] rule_dict = {0:\"age <= 70\",1:\"ltv<=90\",", "= [1,0,1,0] rule_dict = {0:\"age <= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght >= 3\"} result_dict", "3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght >= 3\"} result_dict = get_rules_for_individual(ind) expected_dict = {0:\"age <= 70\",3:\"no_of_primary_accts", "= get_rules_for_individual(ind) expected_dict = {0:\"age <= 70\",3:\"no_of_primary_accts <= 50\"} assert result_dict == expected_dict", "3\"} result_dict = get_rules_for_individual(ind) expected_dict = {0:\"age <= 70\",3:\"no_of_primary_accts <= 50\"} assert result_dict", "rule_dict = {0:\"age <= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght >= 3\"} result_dict = get_rules_for_individual(ind)", "[1,0,1,0] rule_dict = {0:\"age <= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <= 50\",4:\"credit_history_lenght >= 3\"} result_dict =", "get_rules_for_individual def test_get_rule_for_individual(): ind = [1,0,1,0] rule_dict = {0:\"age <= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts <=", "import pytest from kooptimize.korules import get_rules_for_individual def test_get_rule_for_individual(): ind = [1,0,1,0] rule_dict =", "import get_rules_for_individual def test_get_rule_for_individual(): ind = [1,0,1,0] rule_dict = {0:\"age <= 70\",1:\"ltv<=90\", 3:\"no_of_primary_accts" ]
[ "file handling.\"\"\" from .threadpool import open from . import tempfile __all__ = [\"open\",", "asyncio-friendly file handling.\"\"\" from .threadpool import open from . import tempfile __all__ =", "\"\"\"Utilities for asyncio-friendly file handling.\"\"\" from .threadpool import open from . import tempfile", "<gh_stars>1000+ \"\"\"Utilities for asyncio-friendly file handling.\"\"\" from .threadpool import open from . import", "handling.\"\"\" from .threadpool import open from . import tempfile __all__ = [\"open\", \"tempfile\"]", "for asyncio-friendly file handling.\"\"\" from .threadpool import open from . import tempfile __all__" ]
[ "correct, and it took me too long, better consider better all the case.", "num == 5: return \"jkl\" elif num == 6: return \"mno\" elif num", "for c in char: new_string = str_member + c new_output.append(new_string) perm_key(new_output, rest) return", "small_output ''' for character in keypad_string: for item in small_output: new_item = item", "of `num` last_digit = num % 10 '''Step 1''' # Recursive call to", "sorted([\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"]) test_keypad(input, expected_output) # Example", "input[:1], input[1:] char = get_characters(int(key)) new_output = [] for c in char: new_string", "elif num == 3: return \"def\" elif num == 4: return \"ghi\" elif", "expected_output) input = 354 expected_output = sorted([\"djg\", \"ejg\", \"fjg\", \"dkg\", \"ekg\", \"fkg\", \"dlg\",", "input: return output, input elif not output: key, rest = input[:1], input[1:] char", "= perm_key([], str(num)) return out # Recursive Solution Udacity one, way easier and", "str_member + c new_output.append(new_string) perm_key(new_output, rest) return new_output, rest print('return --->', perm_key([], '23'))", "23 expected_output = sorted([\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"]) test_keypad(input,", "TODO: Write your keypad solution here! out, _ = perm_key([], str(num)) return out", "single digit, get the LIST having one element - the associated string elif", "return \"def\" elif num == 4: return \"ghi\" elif num == 5: return", "same function with “floor” of the `num//10` small_output = keypad(num // 10) #", "'''Permute the characters of result obtained from Step 1 and Step 2''' output", "== 6: return \"mno\" elif num == 7: return \"pqrs\" elif num ==", "Udacity one, way easier and it's great how handle digits def keypad(num): #", "return output def test_keypad(input, expected_output): print(keypad(input)) if sorted(keypad(input)) == expected_output: print(\"Yay. We got", "\"jkl\" elif num == 6: return \"mno\" elif num == 7: return \"pqrs\"", "= num % 10 '''Step 1''' # Recursive call to the same function", "print(\"Yay. We got it right.\") else: print(\"Oops! That was incorrect.\") # Example case", "out # Recursive Solution Udacity one, way easier and it's great how handle", "with “floor” of the `num//10` small_output = keypad(num // 10) # returns a", "it took me too long, better consider better all the case. def perm_key(output,", "c new_output.append(new_string) perm_key(new_output, rest) return new_output, rest print('return --->', perm_key([], '23')) def keypad(num):", "Recursive Solution Udacity one, way easier and it's great how handle digits def", "for the `last_digit` keypad_string = get_characters(last_digit) # returns a string '''Permute the characters", "2''' output = list() ''' The Idea: Each character of keypad_string must be", "\"dki\", \"eki\", \"fki\", \"dli\", \"eli\", \"fli\"]) test_keypad(input, expected_output) # Base case: list with", "= sorted([\"t\", \"u\", \"v\"]) test_keypad(input, expected_output) input = 354 expected_output = sorted([\"djg\", \"ejg\",", "\"wxyz\" else: return \"\" # my solution, this was not correct, and it", "and Step 2''' output = list() ''' The Idea: Each character of keypad_string", "test_keypad(input, expected_output) # Example case input = 8 expected_output = sorted([\"t\", \"u\", \"v\"])", "get_characters(int(key)) new_output = [] for str_member in output: for c in char: new_string", "= get_characters(int(key)) new_output = [] for str_member in output: for c in char:", "\"dkg\", \"ekg\", \"fkg\", \"dlg\", \"elg\", \"flg\", \"djh\", \"ejh\", \"fjh\", \"dkh\", \"ekh\", \"fkh\", \"dlh\",", "char = get_characters(int(key)) new_output = [] for str_member in output: for c in", "We got it right.\") else: print(\"Oops! That was incorrect.\") # Example case input", "5: return \"jkl\" elif num == 6: return \"mno\" elif num == 7:", "char: new_string = c new_output.append(new_string) final_output, final_input = perm_key(new_output, rest) return final_output, final_input", "def test_keypad(input, expected_output): print(keypad(input)) if sorted(keypad(input)) == expected_output: print(\"Yay. We got it right.\")", "associated string for the `last_digit` keypad_string = get_characters(last_digit) # returns a string '''Permute", "% 10 '''Step 1''' # Recursive call to the same function with “floor”", "1 and Step 2''' output = list() ''' The Idea: Each character of", "it right.\") else: print(\"Oops! That was incorrect.\") # Example case input = 23", "else: print(\"Oops! That was incorrect.\") # Example case input = 23 expected_output =", "keypad(num): # Base case if num <= 1: return [\"\"] # If `num`", "\"fa\", \"fb\", \"fc\"]) test_keypad(input, expected_output) # Example case input = 8 expected_output =", "me too long, better consider better all the case. def perm_key(output, input): if", "= 32 expected_output = sorted([\"da\", \"db\", \"dc\", \"ea\", \"eb\", \"ec\", \"fa\", \"fb\", \"fc\"])", "the associated string elif 1 < num <= 9: return list(get_characters(num)) # Otherwise", "num == 9: return \"wxyz\" else: return \"\" # my solution, this was", "== 2: return \"abc\" elif num == 3: return \"def\" elif num ==", "\"fb\", \"fc\"]) test_keypad(input, expected_output) # Example case input = 8 expected_output = sorted([\"t\",", "output = list() ''' The Idea: Each character of keypad_string must be appended", "incorrect.\") # Example case input = 23 expected_output = sorted([\"ad\", \"ae\", \"af\", \"bd\",", "not input: return output, input elif not output: key, rest = input[:1], input[1:]", "The Idea: Each character of keypad_string must be appended to the end of", "print(keypad(input)) if sorted(keypad(input)) == expected_output: print(\"Yay. We got it right.\") else: print(\"Oops! That", "string '''Permute the characters of result obtained from Step 1 and Step 2'''", "this was not correct, and it took me too long, better consider better", "return \"\" # my solution, this was not correct, and it took me", "return \"pqrs\" elif num == 8: return \"tuv\" elif num == 9: return", "# Example case input = 32 expected_output = sorted([\"da\", \"db\", \"dc\", \"ea\", \"eb\",", "== 4: return \"ghi\" elif num == 5: return \"jkl\" elif num ==", "input elif not output: key, rest = input[:1], input[1:] char = get_characters(int(key)) new_output", "# Get the associated string for the `last_digit` keypad_string = get_characters(last_digit) # returns", "return \"mno\" elif num == 7: return \"pqrs\" elif num == 8: return", "in keypad_string: for item in small_output: new_item = item + character output.append(new_item) return", "input): if not input: return output, input elif not output: key, rest =", "get_characters(last_digit) # returns a string '''Permute the characters of result obtained from Step", "\"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"]) test_keypad(input, expected_output) # Example case", "a LIST of strings '''Step 2''' # Get the associated string for the", "character in keypad_string: for item in small_output: new_item = item + character output.append(new_item)", "keypad solution here! out, _ = perm_key([], str(num)) return out # Recursive Solution", "input[:1], input[1:] char = get_characters(int(key)) new_output = [] for str_member in output: for", "Example case input = 32 expected_output = sorted([\"da\", \"db\", \"dc\", \"ea\", \"eb\", \"ec\",", "\"fkg\", \"dlg\", \"elg\", \"flg\", \"djh\", \"ejh\", \"fjh\", \"dkh\", \"ekh\", \"fkh\", \"dlh\", \"elh\", \"flh\",", "easier and it's great how handle digits def keypad(num): # Base case if", "great how handle digits def keypad(num): # Base case if num <= 1:", "return \"ghi\" elif num == 5: return \"jkl\" elif num == 6: return", "Each character of keypad_string must be appended to the end of each string", "return \"jkl\" elif num == 6: return \"mno\" elif num == 7: return", "list() ''' The Idea: Each character of keypad_string must be appended to the", "def get_characters(num): if num == 2: return \"abc\" elif num == 3: return", "return new_output, rest print('return --->', perm_key([], '23')) def keypad(num): # TODO: Write your", "in the small_output ''' for character in keypad_string: for item in small_output: new_item", "\"fc\"]) test_keypad(input, expected_output) # Example case input = 8 expected_output = sorted([\"t\", \"u\",", "Example case input = 23 expected_output = sorted([\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\",", "= perm_key(new_output, rest) return final_output, final_input else: key, rest = input[:1], input[1:] char", "\"ekg\", \"fkg\", \"dlg\", \"elg\", \"flg\", \"djh\", \"ejh\", \"fjh\", \"dkh\", \"ekh\", \"fkh\", \"dlh\", \"elh\",", "solution here! out, _ = perm_key([], str(num)) return out # Recursive Solution Udacity", "out, _ = perm_key([], str(num)) return out # Recursive Solution Udacity one, way", "== 8: return \"tuv\" elif num == 9: return \"wxyz\" else: return \"\"", "the characters of result obtained from Step 1 and Step 2''' output =", "test_keypad(input, expected_output) # Example case input = 32 expected_output = sorted([\"da\", \"db\", \"dc\",", "= sorted([\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"]) test_keypad(input, expected_output) #", "of result obtained from Step 1 and Step 2''' output = list() '''", "better all the case. def perm_key(output, input): if not input: return output, input", "// 10) # returns a LIST of strings '''Step 2''' # Get the", "case input = 8 expected_output = sorted([\"t\", \"u\", \"v\"]) test_keypad(input, expected_output) input =", "perm_key(new_output, rest) return final_output, final_input else: key, rest = input[:1], input[1:] char =", "\"flh\", \"dji\", \"eji\", \"fji\", \"dki\", \"eki\", \"fki\", \"dli\", \"eli\", \"fli\"]) test_keypad(input, expected_output) #", "== 7: return \"pqrs\" elif num == 8: return \"tuv\" elif num ==", "available in the small_output ''' for character in keypad_string: for item in small_output:", "keypad(num // 10) # returns a LIST of strings '''Step 2''' # Get", "one element - the associated string elif 1 < num <= 9: return", "32 expected_output = sorted([\"da\", \"db\", \"dc\", \"ea\", \"eb\", \"ec\", \"fa\", \"fb\", \"fc\"]) test_keypad(input,", "num == 7: return \"pqrs\" elif num == 8: return \"tuv\" elif num", "7: return \"pqrs\" elif num == 8: return \"tuv\" elif num == 9:", "\"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"]) test_keypad(input, expected_output) # Example case input =", "item in small_output: new_item = item + character output.append(new_item) return output def test_keypad(input,", "1''' # Recursive call to the same function with “floor” of the `num//10`", "character output.append(new_item) return output def test_keypad(input, expected_output): print(keypad(input)) if sorted(keypad(input)) == expected_output: print(\"Yay.", "def keypad(num): # TODO: Write your keypad solution here! out, _ = perm_key([],", "the `last_digit` keypad_string = get_characters(last_digit) # returns a string '''Permute the characters of", "2''' # Get the associated string for the `last_digit` keypad_string = get_characters(last_digit) #", "must be appended to the end of each string available in the small_output", "== expected_output: print(\"Yay. We got it right.\") else: print(\"Oops! That was incorrect.\") #", "it's great how handle digits def keypad(num): # Base case if num <=", "# Base case: list with empty string input = 0 expected_output = [\"\"]", "if not input: return output, input elif not output: key, rest = input[:1],", "\"ejg\", \"fjg\", \"dkg\", \"ekg\", \"fkg\", \"dlg\", \"elg\", \"flg\", \"djh\", \"ejh\", \"fjh\", \"dkh\", \"ekh\",", "key, rest = input[:1], input[1:] char = get_characters(int(key)) new_output = [] for str_member", "num == 6: return \"mno\" elif num == 7: return \"pqrs\" elif num", "to the end of each string available in the small_output ''' for character", "the small_output ''' for character in keypad_string: for item in small_output: new_item =", "\"ghi\" elif num == 5: return \"jkl\" elif num == 6: return \"mno\"", "for c in char: new_string = c new_output.append(new_string) final_output, final_input = perm_key(new_output, rest)", "\"fkh\", \"dlh\", \"elh\", \"flh\", \"dji\", \"eji\", \"fji\", \"dki\", \"eki\", \"fki\", \"dli\", \"eli\", \"fli\"])", "10. Find the unit's (last) digits of `num` last_digit = num % 10", "If `num` is single digit, get the LIST having one element - the", "key, rest = input[:1], input[1:] char = get_characters(int(key)) new_output = [] for c", "\"be\", \"bf\", \"cd\", \"ce\", \"cf\"]) test_keypad(input, expected_output) # Example case input = 32", "get the LIST having one element - the associated string elif 1 <", "new_string = c new_output.append(new_string) final_output, final_input = perm_key(new_output, rest) return final_output, final_input else:", "Base case if num <= 1: return [\"\"] # If `num` is single", "LIST of strings '''Step 2''' # Get the associated string for the `last_digit`", "elif num == 5: return \"jkl\" elif num == 6: return \"mno\" elif", "perm_key([], '23')) def keypad(num): # TODO: Write your keypad solution here! out, _", "\"eki\", \"fki\", \"dli\", \"eli\", \"fli\"]) test_keypad(input, expected_output) # Base case: list with empty", "list(get_characters(num)) # Otherwise `num` >= 10. Find the unit's (last) digits of `num`", "return output, input elif not output: key, rest = input[:1], input[1:] char =", "Solution Udacity one, way easier and it's great how handle digits def keypad(num):", "a string '''Permute the characters of result obtained from Step 1 and Step", "characters of result obtained from Step 1 and Step 2''' output = list()", "small_output = keypad(num // 10) # returns a LIST of strings '''Step 2'''", "expected_output) # Example case input = 8 expected_output = sorted([\"t\", \"u\", \"v\"]) test_keypad(input,", "case. def perm_key(output, input): if not input: return output, input elif not output:", "\"dlh\", \"elh\", \"flh\", \"dji\", \"eji\", \"fji\", \"dki\", \"eki\", \"fki\", \"dli\", \"eli\", \"fli\"]) test_keypad(input,", "\"fjg\", \"dkg\", \"ekg\", \"fkg\", \"dlg\", \"elg\", \"flg\", \"djh\", \"ejh\", \"fjh\", \"dkh\", \"ekh\", \"fkh\",", "\"ce\", \"cf\"]) test_keypad(input, expected_output) # Example case input = 32 expected_output = sorted([\"da\",", "if sorted(keypad(input)) == expected_output: print(\"Yay. We got it right.\") else: print(\"Oops! That was", "expected_output = sorted([\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"]) test_keypad(input, expected_output)", "and it took me too long, better consider better all the case. def", "Idea: Each character of keypad_string must be appended to the end of each", "return \"tuv\" elif num == 9: return \"wxyz\" else: return \"\" # my", "keypad(num): # TODO: Write your keypad solution here! out, _ = perm_key([], str(num))", "return out # Recursive Solution Udacity one, way easier and it's great how", "Example case input = 8 expected_output = sorted([\"t\", \"u\", \"v\"]) test_keypad(input, expected_output) input", "num == 4: return \"ghi\" elif num == 5: return \"jkl\" elif num", "354 expected_output = sorted([\"djg\", \"ejg\", \"fjg\", \"dkg\", \"ekg\", \"fkg\", \"dlg\", \"elg\", \"flg\", \"djh\",", "# my solution, this was not correct, and it took me too long,", "output: for c in char: new_string = str_member + c new_output.append(new_string) perm_key(new_output, rest)", "<filename>data_structures/recursion/key_combinatons.py def get_characters(num): if num == 2: return \"abc\" elif num == 3:", "4: return \"ghi\" elif num == 5: return \"jkl\" elif num == 6:", "# If `num` is single digit, get the LIST having one element -", "= input[:1], input[1:] char = get_characters(int(key)) new_output = [] for c in char:", "# Recursive call to the same function with “floor” of the `num//10` small_output", "string available in the small_output ''' for character in keypad_string: for item in", "function with “floor” of the `num//10` small_output = keypad(num // 10) # returns", "if num == 2: return \"abc\" elif num == 3: return \"def\" elif", "str(num)) return out # Recursive Solution Udacity one, way easier and it's great", "elif num == 8: return \"tuv\" elif num == 9: return \"wxyz\" else:", "if num <= 1: return [\"\"] # If `num` is single digit, get", "Write your keypad solution here! out, _ = perm_key([], str(num)) return out #", "test_keypad(input, expected_output) # Base case: list with empty string input = 0 expected_output", "input = 8 expected_output = sorted([\"t\", \"u\", \"v\"]) test_keypad(input, expected_output) input = 354", "the end of each string available in the small_output ''' for character in", "6: return \"mno\" elif num == 7: return \"pqrs\" elif num == 8:", "strings '''Step 2''' # Get the associated string for the `last_digit` keypad_string =", "\"dji\", \"eji\", \"fji\", \"dki\", \"eki\", \"fki\", \"dli\", \"eli\", \"fli\"]) test_keypad(input, expected_output) # Base", "# Otherwise `num` >= 10. Find the unit's (last) digits of `num` last_digit", "consider better all the case. def perm_key(output, input): if not input: return output,", "LIST having one element - the associated string elif 1 < num <=", ">= 10. Find the unit's (last) digits of `num` last_digit = num %", "your keypad solution here! out, _ = perm_key([], str(num)) return out # Recursive", "sorted([\"t\", \"u\", \"v\"]) test_keypad(input, expected_output) input = 354 expected_output = sorted([\"djg\", \"ejg\", \"fjg\",", "\"elh\", \"flh\", \"dji\", \"eji\", \"fji\", \"dki\", \"eki\", \"fki\", \"dli\", \"eli\", \"fli\"]) test_keypad(input, expected_output)", "final_input = perm_key(new_output, rest) return final_output, final_input else: key, rest = input[:1], input[1:]", "get_characters(int(key)) new_output = [] for c in char: new_string = c new_output.append(new_string) final_output,", "digits def keypad(num): # Base case if num <= 1: return [\"\"] #", "c in char: new_string = c new_output.append(new_string) final_output, final_input = perm_key(new_output, rest) return", "final_input else: key, rest = input[:1], input[1:] char = get_characters(int(key)) new_output = []", "one, way easier and it's great how handle digits def keypad(num): # Base", "# TODO: Write your keypad solution here! out, _ = perm_key([], str(num)) return", "input = 354 expected_output = sorted([\"djg\", \"ejg\", \"fjg\", \"dkg\", \"ekg\", \"fkg\", \"dlg\", \"elg\",", "appended to the end of each string available in the small_output ''' for", "case input = 32 expected_output = sorted([\"da\", \"db\", \"dc\", \"ea\", \"eb\", \"ec\", \"fa\",", "\"dc\", \"ea\", \"eb\", \"ec\", \"fa\", \"fb\", \"fc\"]) test_keypad(input, expected_output) # Example case input", "\"fli\"]) test_keypad(input, expected_output) # Base case: list with empty string input = 0", "c new_output.append(new_string) final_output, final_input = perm_key(new_output, rest) return final_output, final_input else: key, rest", "all the case. def perm_key(output, input): if not input: return output, input elif", "get_characters(num): if num == 2: return \"abc\" elif num == 3: return \"def\"", "8: return \"tuv\" elif num == 9: return \"wxyz\" else: return \"\" #", "== 5: return \"jkl\" elif num == 6: return \"mno\" elif num ==", "\"tuv\" elif num == 9: return \"wxyz\" else: return \"\" # my solution,", "keypad_string must be appended to the end of each string available in the", "[] for c in char: new_string = c new_output.append(new_string) final_output, final_input = perm_key(new_output,", "small_output: new_item = item + character output.append(new_item) return output def test_keypad(input, expected_output): print(keypad(input))", "rest) return new_output, rest print('return --->', perm_key([], '23')) def keypad(num): # TODO: Write", "keypad_string = get_characters(last_digit) # returns a string '''Permute the characters of result obtained", "output def test_keypad(input, expected_output): print(keypad(input)) if sorted(keypad(input)) == expected_output: print(\"Yay. We got it", "the LIST having one element - the associated string elif 1 < num", "\"\" # my solution, this was not correct, and it took me too", "# Example case input = 8 expected_output = sorted([\"t\", \"u\", \"v\"]) test_keypad(input, expected_output)", "9: return \"wxyz\" else: return \"\" # my solution, this was not correct,", "3: return \"def\" elif num == 4: return \"ghi\" elif num == 5:", "def keypad(num): # Base case if num <= 1: return [\"\"] # If", "of each string available in the small_output ''' for character in keypad_string: for", "expected_output) # Base case: list with empty string input = 0 expected_output =", "long, better consider better all the case. def perm_key(output, input): if not input:", "new_output, rest print('return --->', perm_key([], '23')) def keypad(num): # TODO: Write your keypad", "- the associated string elif 1 < num <= 9: return list(get_characters(num)) #", "print(\"Oops! That was incorrect.\") # Example case input = 23 expected_output = sorted([\"ad\",", "# returns a string '''Permute the characters of result obtained from Step 1", "rest print('return --->', perm_key([], '23')) def keypad(num): # TODO: Write your keypad solution", "in char: new_string = c new_output.append(new_string) final_output, final_input = perm_key(new_output, rest) return final_output,", "\"db\", \"dc\", \"ea\", \"eb\", \"ec\", \"fa\", \"fb\", \"fc\"]) test_keypad(input, expected_output) # Example case", "keypad_string: for item in small_output: new_item = item + character output.append(new_item) return output", "--->', perm_key([], '23')) def keypad(num): # TODO: Write your keypad solution here! out,", "\"ea\", \"eb\", \"ec\", \"fa\", \"fb\", \"fc\"]) test_keypad(input, expected_output) # Example case input =", "`last_digit` keypad_string = get_characters(last_digit) # returns a string '''Permute the characters of result", "return final_output, final_input else: key, rest = input[:1], input[1:] char = get_characters(int(key)) new_output", "new_output = [] for str_member in output: for c in char: new_string =", "sorted([\"da\", \"db\", \"dc\", \"ea\", \"eb\", \"ec\", \"fa\", \"fb\", \"fc\"]) test_keypad(input, expected_output) # Example", "test_keypad(input, expected_output) input = 354 expected_output = sorted([\"djg\", \"ejg\", \"fjg\", \"dkg\", \"ekg\", \"fkg\",", "of the `num//10` small_output = keypad(num // 10) # returns a LIST of", "\"eb\", \"ec\", \"fa\", \"fb\", \"fc\"]) test_keypad(input, expected_output) # Example case input = 8", "and it's great how handle digits def keypad(num): # Base case if num", "string for the `last_digit` keypad_string = get_characters(last_digit) # returns a string '''Permute the", "obtained from Step 1 and Step 2''' output = list() ''' The Idea:", "the `num//10` small_output = keypad(num // 10) # returns a LIST of strings", "return [\"\"] # If `num` is single digit, get the LIST having one", "def perm_key(output, input): if not input: return output, input elif not output: key,", "Get the associated string for the `last_digit` keypad_string = get_characters(last_digit) # returns a", "num <= 1: return [\"\"] # If `num` is single digit, get the", "input = 23 expected_output = sorted([\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\",", "sorted([\"djg\", \"ejg\", \"fjg\", \"dkg\", \"ekg\", \"fkg\", \"dlg\", \"elg\", \"flg\", \"djh\", \"ejh\", \"fjh\", \"dkh\",", "\"v\"]) test_keypad(input, expected_output) input = 354 expected_output = sorted([\"djg\", \"ejg\", \"fjg\", \"dkg\", \"ekg\",", "elif num == 6: return \"mno\" elif num == 7: return \"pqrs\" elif", "for character in keypad_string: for item in small_output: new_item = item + character", "perm_key([], str(num)) return out # Recursive Solution Udacity one, way easier and it's", "= 23 expected_output = sorted([\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"])", "\"fji\", \"dki\", \"eki\", \"fki\", \"dli\", \"eli\", \"fli\"]) test_keypad(input, expected_output) # Base case: list", "elif not output: key, rest = input[:1], input[1:] char = get_characters(int(key)) new_output =", "rest) return final_output, final_input else: key, rest = input[:1], input[1:] char = get_characters(int(key))", "case: list with empty string input = 0 expected_output = [\"\"] test_keypad(input, expected_output)", "= get_characters(last_digit) # returns a string '''Permute the characters of result obtained from", "be appended to the end of each string available in the small_output '''", "1 < num <= 9: return list(get_characters(num)) # Otherwise `num` >= 10. Find", "Step 1 and Step 2''' output = list() ''' The Idea: Each character", "for str_member in output: for c in char: new_string = str_member + c", "way easier and it's great how handle digits def keypad(num): # Base case", "\"dli\", \"eli\", \"fli\"]) test_keypad(input, expected_output) # Base case: list with empty string input", "result obtained from Step 1 and Step 2''' output = list() ''' The", "each string available in the small_output ''' for character in keypad_string: for item", "elif num == 9: return \"wxyz\" else: return \"\" # my solution, this", "for item in small_output: new_item = item + character output.append(new_item) return output def", "Find the unit's (last) digits of `num` last_digit = num % 10 '''Step", "<= 9: return list(get_characters(num)) # Otherwise `num` >= 10. Find the unit's (last)", "''' The Idea: Each character of keypad_string must be appended to the end", "\"bf\", \"cd\", \"ce\", \"cf\"]) test_keypad(input, expected_output) # Example case input = 32 expected_output", "expected_output = sorted([\"t\", \"u\", \"v\"]) test_keypad(input, expected_output) input = 354 expected_output = sorted([\"djg\",", "better consider better all the case. def perm_key(output, input): if not input: return", "+ c new_output.append(new_string) perm_key(new_output, rest) return new_output, rest print('return --->', perm_key([], '23')) def", "rest = input[:1], input[1:] char = get_characters(int(key)) new_output = [] for str_member in", "sorted(keypad(input)) == expected_output: print(\"Yay. We got it right.\") else: print(\"Oops! That was incorrect.\")", "\"cd\", \"ce\", \"cf\"]) test_keypad(input, expected_output) # Example case input = 32 expected_output =", "expected_output) # Example case input = 32 expected_output = sorted([\"da\", \"db\", \"dc\", \"ea\",", "\"dkh\", \"ekh\", \"fkh\", \"dlh\", \"elh\", \"flh\", \"dji\", \"eji\", \"fji\", \"dki\", \"eki\", \"fki\", \"dli\",", "having one element - the associated string elif 1 < num <= 9:", "<= 1: return [\"\"] # If `num` is single digit, get the LIST", "'23')) def keypad(num): # TODO: Write your keypad solution here! out, _ =", "case input = 23 expected_output = sorted([\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\",", "\"elg\", \"flg\", \"djh\", \"ejh\", \"fjh\", \"dkh\", \"ekh\", \"fkh\", \"dlh\", \"elh\", \"flh\", \"dji\", \"eji\",", "the case. def perm_key(output, input): if not input: return output, input elif not", "= 8 expected_output = sorted([\"t\", \"u\", \"v\"]) test_keypad(input, expected_output) input = 354 expected_output", "perm_key(output, input): if not input: return output, input elif not output: key, rest", "num == 2: return \"abc\" elif num == 3: return \"def\" elif num", "is single digit, get the LIST having one element - the associated string", "\"eji\", \"fji\", \"dki\", \"eki\", \"fki\", \"dli\", \"eli\", \"fli\"]) test_keypad(input, expected_output) # Base case:", "else: return \"\" # my solution, this was not correct, and it took", "10) # returns a LIST of strings '''Step 2''' # Get the associated", "\"fjh\", \"dkh\", \"ekh\", \"fkh\", \"dlh\", \"elh\", \"flh\", \"dji\", \"eji\", \"fji\", \"dki\", \"eki\", \"fki\",", "in small_output: new_item = item + character output.append(new_item) return output def test_keypad(input, expected_output):", "2: return \"abc\" elif num == 3: return \"def\" elif num == 4:", "end of each string available in the small_output ''' for character in keypad_string:", "expected_output: print(\"Yay. We got it right.\") else: print(\"Oops! That was incorrect.\") # Example", "num == 8: return \"tuv\" elif num == 9: return \"wxyz\" else: return", "`num` is single digit, get the LIST having one element - the associated", "case if num <= 1: return [\"\"] # If `num` is single digit,", "else: key, rest = input[:1], input[1:] char = get_characters(int(key)) new_output = [] for", "digits of `num` last_digit = num % 10 '''Step 1''' # Recursive call", "That was incorrect.\") # Example case input = 23 expected_output = sorted([\"ad\", \"ae\",", "return \"wxyz\" else: return \"\" # my solution, this was not correct, and", "output, input elif not output: key, rest = input[:1], input[1:] char = get_characters(int(key))", "how handle digits def keypad(num): # Base case if num <= 1: return", "final_output, final_input else: key, rest = input[:1], input[1:] char = get_characters(int(key)) new_output =", "\"def\" elif num == 4: return \"ghi\" elif num == 5: return \"jkl\"", "char: new_string = str_member + c new_output.append(new_string) perm_key(new_output, rest) return new_output, rest print('return", "the associated string for the `last_digit` keypad_string = get_characters(last_digit) # returns a string", "8 expected_output = sorted([\"t\", \"u\", \"v\"]) test_keypad(input, expected_output) input = 354 expected_output =", "'''Step 2''' # Get the associated string for the `last_digit` keypad_string = get_characters(last_digit)", "not correct, and it took me too long, better consider better all the", "num % 10 '''Step 1''' # Recursive call to the same function with", "Recursive call to the same function with “floor” of the `num//10` small_output =", "9: return list(get_characters(num)) # Otherwise `num` >= 10. Find the unit's (last) digits", "= [] for c in char: new_string = c new_output.append(new_string) final_output, final_input =", "my solution, this was not correct, and it took me too long, better", "'''Step 1''' # Recursive call to the same function with “floor” of the", "[\"\"] # If `num` is single digit, get the LIST having one element", "was incorrect.\") # Example case input = 23 expected_output = sorted([\"ad\", \"ae\", \"af\",", "Otherwise `num` >= 10. Find the unit's (last) digits of `num` last_digit =", "== 3: return \"def\" elif num == 4: return \"ghi\" elif num ==", "last_digit = num % 10 '''Step 1''' # Recursive call to the same", "\"ekh\", \"fkh\", \"dlh\", \"elh\", \"flh\", \"dji\", \"eji\", \"fji\", \"dki\", \"eki\", \"fki\", \"dli\", \"eli\",", "right.\") else: print(\"Oops! That was incorrect.\") # Example case input = 23 expected_output", "= sorted([\"da\", \"db\", \"dc\", \"ea\", \"eb\", \"ec\", \"fa\", \"fb\", \"fc\"]) test_keypad(input, expected_output) #", "num <= 9: return list(get_characters(num)) # Otherwise `num` >= 10. Find the unit's", "Step 2''' output = list() ''' The Idea: Each character of keypad_string must", "unit's (last) digits of `num` last_digit = num % 10 '''Step 1''' #", "new_string = str_member + c new_output.append(new_string) perm_key(new_output, rest) return new_output, rest print('return --->',", "c in char: new_string = str_member + c new_output.append(new_string) perm_key(new_output, rest) return new_output,", "# Example case input = 23 expected_output = sorted([\"ad\", \"ae\", \"af\", \"bd\", \"be\",", "\"flg\", \"djh\", \"ejh\", \"fjh\", \"dkh\", \"ekh\", \"fkh\", \"dlh\", \"elh\", \"flh\", \"dji\", \"eji\", \"fji\",", "Base case: list with empty string input = 0 expected_output = [\"\"] test_keypad(input,", "\"mno\" elif num == 7: return \"pqrs\" elif num == 8: return \"tuv\"", "character of keypad_string must be appended to the end of each string available", "`num` >= 10. Find the unit's (last) digits of `num` last_digit = num", "\"ec\", \"fa\", \"fb\", \"fc\"]) test_keypad(input, expected_output) # Example case input = 8 expected_output", "input[1:] char = get_characters(int(key)) new_output = [] for c in char: new_string =", "< num <= 9: return list(get_characters(num)) # Otherwise `num` >= 10. Find the", "= item + character output.append(new_item) return output def test_keypad(input, expected_output): print(keypad(input)) if sorted(keypad(input))", "associated string elif 1 < num <= 9: return list(get_characters(num)) # Otherwise `num`", "\"fki\", \"dli\", \"eli\", \"fli\"]) test_keypad(input, expected_output) # Base case: list with empty string", "to the same function with “floor” of the `num//10` small_output = keypad(num //", "perm_key(new_output, rest) return new_output, rest print('return --->', perm_key([], '23')) def keypad(num): # TODO:", "handle digits def keypad(num): # Base case if num <= 1: return [\"\"]", "in char: new_string = str_member + c new_output.append(new_string) perm_key(new_output, rest) return new_output, rest", "(last) digits of `num` last_digit = num % 10 '''Step 1''' # Recursive", "\"pqrs\" elif num == 8: return \"tuv\" elif num == 9: return \"wxyz\"", "returns a string '''Permute the characters of result obtained from Step 1 and", "was not correct, and it took me too long, better consider better all", "10 '''Step 1''' # Recursive call to the same function with “floor” of", "print('return --->', perm_key([], '23')) def keypad(num): # TODO: Write your keypad solution here!", "took me too long, better consider better all the case. def perm_key(output, input):", "new_output.append(new_string) perm_key(new_output, rest) return new_output, rest print('return --->', perm_key([], '23')) def keypad(num): #", "return list(get_characters(num)) # Otherwise `num` >= 10. Find the unit's (last) digits of", "_ = perm_key([], str(num)) return out # Recursive Solution Udacity one, way easier", "call to the same function with “floor” of the `num//10` small_output = keypad(num", "\"cf\"]) test_keypad(input, expected_output) # Example case input = 32 expected_output = sorted([\"da\", \"db\",", "of strings '''Step 2''' # Get the associated string for the `last_digit` keypad_string", "expected_output = sorted([\"djg\", \"ejg\", \"fjg\", \"dkg\", \"ekg\", \"fkg\", \"dlg\", \"elg\", \"flg\", \"djh\", \"ejh\",", "= str_member + c new_output.append(new_string) perm_key(new_output, rest) return new_output, rest print('return --->', perm_key([],", "\"eli\", \"fli\"]) test_keypad(input, expected_output) # Base case: list with empty string input =", "too long, better consider better all the case. def perm_key(output, input): if not", "returns a LIST of strings '''Step 2''' # Get the associated string for", "solution, this was not correct, and it took me too long, better consider", "# Recursive Solution Udacity one, way easier and it's great how handle digits", "element - the associated string elif 1 < num <= 9: return list(get_characters(num))", "input = 32 expected_output = sorted([\"da\", \"db\", \"dc\", \"ea\", \"eb\", \"ec\", \"fa\", \"fb\",", "from Step 1 and Step 2''' output = list() ''' The Idea: Each", "“floor” of the `num//10` small_output = keypad(num // 10) # returns a LIST", "# returns a LIST of strings '''Step 2''' # Get the associated string", "expected_output): print(keypad(input)) if sorted(keypad(input)) == expected_output: print(\"Yay. We got it right.\") else: print(\"Oops!", "digit, get the LIST having one element - the associated string elif 1", "\"ejh\", \"fjh\", \"dkh\", \"ekh\", \"fkh\", \"dlh\", \"elh\", \"flh\", \"dji\", \"eji\", \"fji\", \"dki\", \"eki\",", "the unit's (last) digits of `num` last_digit = num % 10 '''Step 1'''", "new_item = item + character output.append(new_item) return output def test_keypad(input, expected_output): print(keypad(input)) if", "of keypad_string must be appended to the end of each string available in", "\"dlg\", \"elg\", \"flg\", \"djh\", \"ejh\", \"fjh\", \"dkh\", \"ekh\", \"fkh\", \"dlh\", \"elh\", \"flh\", \"dji\",", "# Base case if num <= 1: return [\"\"] # If `num` is", "output: key, rest = input[:1], input[1:] char = get_characters(int(key)) new_output = [] for", "`num//10` small_output = keypad(num // 10) # returns a LIST of strings '''Step", "+ character output.append(new_item) return output def test_keypad(input, expected_output): print(keypad(input)) if sorted(keypad(input)) == expected_output:", "\"abc\" elif num == 3: return \"def\" elif num == 4: return \"ghi\"", "= sorted([\"djg\", \"ejg\", \"fjg\", \"dkg\", \"ekg\", \"fkg\", \"dlg\", \"elg\", \"flg\", \"djh\", \"ejh\", \"fjh\",", "== 9: return \"wxyz\" else: return \"\" # my solution, this was not", "string elif 1 < num <= 9: return list(get_characters(num)) # Otherwise `num` >=", "elif num == 4: return \"ghi\" elif num == 5: return \"jkl\" elif", "[] for str_member in output: for c in char: new_string = str_member +", "\"u\", \"v\"]) test_keypad(input, expected_output) input = 354 expected_output = sorted([\"djg\", \"ejg\", \"fjg\", \"dkg\",", "rest = input[:1], input[1:] char = get_characters(int(key)) new_output = [] for c in", "= c new_output.append(new_string) final_output, final_input = perm_key(new_output, rest) return final_output, final_input else: key,", "= 354 expected_output = sorted([\"djg\", \"ejg\", \"fjg\", \"dkg\", \"ekg\", \"fkg\", \"dlg\", \"elg\", \"flg\",", "num == 3: return \"def\" elif num == 4: return \"ghi\" elif num", "test_keypad(input, expected_output): print(keypad(input)) if sorted(keypad(input)) == expected_output: print(\"Yay. We got it right.\") else:", "= keypad(num // 10) # returns a LIST of strings '''Step 2''' #", "here! out, _ = perm_key([], str(num)) return out # Recursive Solution Udacity one,", "input[1:] char = get_characters(int(key)) new_output = [] for str_member in output: for c", "\"djh\", \"ejh\", \"fjh\", \"dkh\", \"ekh\", \"fkh\", \"dlh\", \"elh\", \"flh\", \"dji\", \"eji\", \"fji\", \"dki\",", "= [] for str_member in output: for c in char: new_string = str_member", "str_member in output: for c in char: new_string = str_member + c new_output.append(new_string)", "elif num == 7: return \"pqrs\" elif num == 8: return \"tuv\" elif", "new_output = [] for c in char: new_string = c new_output.append(new_string) final_output, final_input", "1: return [\"\"] # If `num` is single digit, get the LIST having", "\"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"]) test_keypad(input, expected_output) # Example case input", "item + character output.append(new_item) return output def test_keypad(input, expected_output): print(keypad(input)) if sorted(keypad(input)) ==", "got it right.\") else: print(\"Oops! That was incorrect.\") # Example case input =", "''' for character in keypad_string: for item in small_output: new_item = item +", "`num` last_digit = num % 10 '''Step 1''' # Recursive call to the", "output.append(new_item) return output def test_keypad(input, expected_output): print(keypad(input)) if sorted(keypad(input)) == expected_output: print(\"Yay. We", "final_output, final_input = perm_key(new_output, rest) return final_output, final_input else: key, rest = input[:1],", "in output: for c in char: new_string = str_member + c new_output.append(new_string) perm_key(new_output,", "elif 1 < num <= 9: return list(get_characters(num)) # Otherwise `num` >= 10.", "the same function with “floor” of the `num//10` small_output = keypad(num // 10)", "return \"abc\" elif num == 3: return \"def\" elif num == 4: return", "not output: key, rest = input[:1], input[1:] char = get_characters(int(key)) new_output = []", "expected_output = sorted([\"da\", \"db\", \"dc\", \"ea\", \"eb\", \"ec\", \"fa\", \"fb\", \"fc\"]) test_keypad(input, expected_output)", "= input[:1], input[1:] char = get_characters(int(key)) new_output = [] for str_member in output:", "= get_characters(int(key)) new_output = [] for c in char: new_string = c new_output.append(new_string)", "new_output.append(new_string) final_output, final_input = perm_key(new_output, rest) return final_output, final_input else: key, rest =", "char = get_characters(int(key)) new_output = [] for c in char: new_string = c", "= list() ''' The Idea: Each character of keypad_string must be appended to" ]
[ "elif answerNumber == 9: result = 'Very doubtful' return result r = random.randint(1,9)", "= 'Outlook not so good' elif answerNumber == 9: result = 'Very doubtful'", "answerNumber == 9: result = 'Very doubtful' return result r = random.randint(1,9) fortune", "answerNumber == 3: result = 'Yes' elif answerNumber == 4: result = 'Reply", "== 4: result = 'Reply hazy try again' elif answerNumber == 5: result", "answerNumber == 1: result = 'It is certain' elif answerNumber == 2: result", "= 'Yes' elif answerNumber == 4: result = 'Reply hazy try again' elif", "elif answerNumber == 6: result = 'Concentrate ans ask again' elif answerNumber ==", "again later' elif answerNumber == 6: result = 'Concentrate ans ask again' elif", "result = 'Concentrate ans ask again' elif answerNumber == 7: result = 'My", "2: result = 'It is decidedly so' elif answerNumber == 3: result =", "== 8: result = 'Outlook not so good' elif answerNumber == 9: result", "no' elif answerNumber == 8: result = 'Outlook not so good' elif answerNumber", "result = 'My reply is no' elif answerNumber == 8: result = 'Outlook", "== 5: result = 'Ask again later' elif answerNumber == 6: result =", "so good' elif answerNumber == 9: result = 'Very doubtful' return result r", "== 9: result = 'Very doubtful' return result r = random.randint(1,9) fortune =", "import random def getAnswer(answerNumber): result = '' if answerNumber == 1: result =", "result = 'Ask again later' elif answerNumber == 6: result = 'Concentrate ans", "def getAnswer(answerNumber): result = '' if answerNumber == 1: result = 'It is", "'Outlook not so good' elif answerNumber == 9: result = 'Very doubtful' return", "= 'It is decidedly so' elif answerNumber == 3: result = 'Yes' elif", "5: result = 'Ask again later' elif answerNumber == 6: result = 'Concentrate", "random def getAnswer(answerNumber): result = '' if answerNumber == 1: result = 'It", "result = '' if answerNumber == 1: result = 'It is certain' elif", "ask again' elif answerNumber == 7: result = 'My reply is no' elif", "answerNumber == 5: result = 'Ask again later' elif answerNumber == 6: result", "is decidedly so' elif answerNumber == 3: result = 'Yes' elif answerNumber ==", "elif answerNumber == 4: result = 'Reply hazy try again' elif answerNumber ==", "result = 'It is decidedly so' elif answerNumber == 3: result = 'Yes'", "elif answerNumber == 8: result = 'Outlook not so good' elif answerNumber ==", "'' if answerNumber == 1: result = 'It is certain' elif answerNumber ==", "result = 'Yes' elif answerNumber == 4: result = 'Reply hazy try again'", "= 'Very doubtful' return result r = random.randint(1,9) fortune = getAnswer(r) print(fortune) print(getAnswer(random.randint(1,9)))", "= 'My reply is no' elif answerNumber == 8: result = 'Outlook not", "'Ask again later' elif answerNumber == 6: result = 'Concentrate ans ask again'", "1: result = 'It is certain' elif answerNumber == 2: result = 'It", "== 3: result = 'Yes' elif answerNumber == 4: result = 'Reply hazy", "reply is no' elif answerNumber == 8: result = 'Outlook not so good'", "<filename>Part_1/ch03_func/3_2_return.py import random def getAnswer(answerNumber): result = '' if answerNumber == 1: result", "if answerNumber == 1: result = 'It is certain' elif answerNumber == 2:", "'My reply is no' elif answerNumber == 8: result = 'Outlook not so", "ans ask again' elif answerNumber == 7: result = 'My reply is no'", "elif answerNumber == 7: result = 'My reply is no' elif answerNumber ==", "so' elif answerNumber == 3: result = 'Yes' elif answerNumber == 4: result", "answerNumber == 8: result = 'Outlook not so good' elif answerNumber == 9:", "= '' if answerNumber == 1: result = 'It is certain' elif answerNumber", "again' elif answerNumber == 7: result = 'My reply is no' elif answerNumber", "'Reply hazy try again' elif answerNumber == 5: result = 'Ask again later'", "result = 'Very doubtful' return result r = random.randint(1,9) fortune = getAnswer(r) print(fortune)", "answerNumber == 7: result = 'My reply is no' elif answerNumber == 8:", "answerNumber == 4: result = 'Reply hazy try again' elif answerNumber == 5:", "elif answerNumber == 3: result = 'Yes' elif answerNumber == 4: result =", "elif answerNumber == 5: result = 'Ask again later' elif answerNumber == 6:", "result = 'Outlook not so good' elif answerNumber == 9: result = 'Very", "'Concentrate ans ask again' elif answerNumber == 7: result = 'My reply is", "== 1: result = 'It is certain' elif answerNumber == 2: result =", "not so good' elif answerNumber == 9: result = 'Very doubtful' return result", "4: result = 'Reply hazy try again' elif answerNumber == 5: result =", "= 'Concentrate ans ask again' elif answerNumber == 7: result = 'My reply", "later' elif answerNumber == 6: result = 'Concentrate ans ask again' elif answerNumber", "elif answerNumber == 2: result = 'It is decidedly so' elif answerNumber ==", "== 2: result = 'It is decidedly so' elif answerNumber == 3: result", "== 7: result = 'My reply is no' elif answerNumber == 8: result", "result = 'It is certain' elif answerNumber == 2: result = 'It is", "8: result = 'Outlook not so good' elif answerNumber == 9: result =", "= 'Ask again later' elif answerNumber == 6: result = 'Concentrate ans ask", "7: result = 'My reply is no' elif answerNumber == 8: result =", "hazy try again' elif answerNumber == 5: result = 'Ask again later' elif", "try again' elif answerNumber == 5: result = 'Ask again later' elif answerNumber", "answerNumber == 2: result = 'It is decidedly so' elif answerNumber == 3:", "'It is certain' elif answerNumber == 2: result = 'It is decidedly so'", "3: result = 'Yes' elif answerNumber == 4: result = 'Reply hazy try", "good' elif answerNumber == 9: result = 'Very doubtful' return result r =", "decidedly so' elif answerNumber == 3: result = 'Yes' elif answerNumber == 4:", "is no' elif answerNumber == 8: result = 'Outlook not so good' elif", "certain' elif answerNumber == 2: result = 'It is decidedly so' elif answerNumber", "'It is decidedly so' elif answerNumber == 3: result = 'Yes' elif answerNumber", "result = 'Reply hazy try again' elif answerNumber == 5: result = 'Ask", "== 6: result = 'Concentrate ans ask again' elif answerNumber == 7: result", "9: result = 'Very doubtful' return result r = random.randint(1,9) fortune = getAnswer(r)", "getAnswer(answerNumber): result = '' if answerNumber == 1: result = 'It is certain'", "answerNumber == 6: result = 'Concentrate ans ask again' elif answerNumber == 7:", "= 'Reply hazy try again' elif answerNumber == 5: result = 'Ask again", "'Yes' elif answerNumber == 4: result = 'Reply hazy try again' elif answerNumber", "= 'It is certain' elif answerNumber == 2: result = 'It is decidedly", "is certain' elif answerNumber == 2: result = 'It is decidedly so' elif", "again' elif answerNumber == 5: result = 'Ask again later' elif answerNumber ==", "6: result = 'Concentrate ans ask again' elif answerNumber == 7: result =" ]
[ "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0023_game_max_words'), ] operations =", "Migration(migrations.Migration): dependencies = [ ('core', '0023_game_max_words'), ] operations = [ migrations.RenameField( model_name='game', old_name='max_words',", "2.2.10 on 2020-04-10 22:28 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "on 2020-04-10 22:28 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core',", "2020-04-10 22:28 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0023_game_max_words'),", "# Generated by Django 2.2.10 on 2020-04-10 22:28 from django.db import migrations class", "by Django 2.2.10 on 2020-04-10 22:28 from django.db import migrations class Migration(migrations.Migration): dependencies", "migrations class Migration(migrations.Migration): dependencies = [ ('core', '0023_game_max_words'), ] operations = [ migrations.RenameField(", "class Migration(migrations.Migration): dependencies = [ ('core', '0023_game_max_words'), ] operations = [ migrations.RenameField( model_name='game',", "[ ('core', '0023_game_max_words'), ] operations = [ migrations.RenameField( model_name='game', old_name='max_words', new_name='words_per_player', ), ]", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0023_game_max_words'), ] operations", "= [ ('core', '0023_game_max_words'), ] operations = [ migrations.RenameField( model_name='game', old_name='max_words', new_name='words_per_player', ),", "Generated by Django 2.2.10 on 2020-04-10 22:28 from django.db import migrations class Migration(migrations.Migration):", "dependencies = [ ('core', '0023_game_max_words'), ] operations = [ migrations.RenameField( model_name='game', old_name='max_words', new_name='words_per_player',", "22:28 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0023_game_max_words'), ]", "import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0023_game_max_words'), ] operations = [", "Django 2.2.10 on 2020-04-10 22:28 from django.db import migrations class Migration(migrations.Migration): dependencies =" ]
[ "cfg.SubCommandOpt( 'sub', dest='sub', title='Sub Options', handler=register_sub_opts) ] def main(): \"\"\"Parse options and call", "= [ '/bin/bash', '-c', 'ssh -i {} -o ProxyCommand=\"ssh -q -i {} -W", "from catena.common import config from catena.common.utils import decrypt_private_rsakey from catena.common.utils import decrypt_rsakey from", "language governing permissions and # limitations under the License. from __future__ import print_function", "Unless required by applicable law or agreed to in writing, software # distributed", "parser.set_defaults(action='ssh') SUB_OPTS = [ cfg.SubCommandOpt( 'sub', dest='sub', title='Sub Options', handler=register_sub_opts) ] def main():", "= enginefacade.writer.get_engine() return models.unregister_models(context) def output_ssh_key(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id)", "LOG = log.getLogger(__name__) def register_models(): context = enginefacade.writer.get_engine() return models.register_models(context) def unregister_models(): context", "does not exist') print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id)", "parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser = subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser = subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key)", "this file except in compliance with the License. # You may obtain a", "import subprocess import sys import tempfile from oslo_config import cfg from oslo_db.sqlalchemy import", "= cfg.CONF LOG = log.getLogger(__name__) def register_models(): context = enginefacade.writer.get_engine() return models.register_models(context) def", "CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging() try: if CONF.sub.action.startswith('db'): return CONF.sub.action_fn() if CONF.sub.action.startswith('ssh'): return CONF.sub.action_fn() except", "register_sub_opts(subparser): parser = subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser = subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser =", "config from catena.common.utils import decrypt_private_rsakey from catena.common.utils import decrypt_rsakey from catena.db.sqlalchemy import api", "CONF.sub.action.startswith('db'): return CONF.sub.action_fn() if CONF.sub.action.startswith('ssh'): return CONF.sub.action_fn() except Exception as e: sys.exit(\"ERROR: {0}\".format(e))", "node = db_api.get_node(context, chain, CONF.sub.node_id) if node is None: return LOG.error('This node-id does", "call the appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging() try: if CONF.sub.action.startswith('db'): return CONF.sub.action_fn() if", "temp_node_ssh, tempfile.NamedTemporaryFile( dir=home) as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh ) args =", "chain, CONF.sub.node_id) if node is None: return LOG.error('This node-id does not exist') print(decrypt_rsakey(node.ssh_key))", "parser = subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS = [ cfg.SubCommandOpt( 'sub', dest='sub',", "specific language governing permissions and # limitations under the License. from __future__ import", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "limitations under the License. from __future__ import print_function import os import subprocess import", "parser.set_defaults(action='ssh_key') parser = subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS = [ cfg.SubCommandOpt( 'sub',", "decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh ) args = [ '/bin/bash', '-c', 'ssh -i", "SUB_OPTS = [ cfg.SubCommandOpt( 'sub', dest='sub', title='Sub Options', handler=register_sub_opts) ] def main(): \"\"\"Parse", "' 'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip, node.ip) ] process = subprocess.Popen(args) process.wait() def", "return models.unregister_models(context) def output_ssh_key(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain", "args = [ '/bin/bash', '-c', 'ssh -i {} -o ProxyCommand=\"ssh -q -i {}", "not exist') home = os.path.expanduser(\"~/.ssh\") jumpbox_ip = chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile( dir=home) as temp_node_ssh,", "2017 Hewlett Packard Enterprise Development LP. # # Licensed under the Apache License,", "catena.common.utils import decrypt_private_rsakey from catena.common.utils import decrypt_rsakey from catena.db.sqlalchemy import api as db_api", "'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip, node.ip) ] process = subprocess.Popen(args) process.wait() def register_sub_opts(subparser):", "LP. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "os import subprocess import sys import tempfile from oslo_config import cfg from oslo_db.sqlalchemy", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "temp_jumpbox_ssh ) args = [ '/bin/bash', '-c', 'ssh -i {} -o ProxyCommand=\"ssh -q", "jumpbox_ip, node.ip) ] process = subprocess.Popen(args) process.wait() def register_sub_opts(subparser): parser = subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models)", "under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "the License. from __future__ import print_function import os import subprocess import sys import", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "either express or # implied. # See the License for the specific language", "not exist') print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if", "\"\"\"Parse options and call the appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging() try: if CONF.sub.action.startswith('db'):", "config.parse_args(sys.argv[1:]) config.setup_logging() try: if CONF.sub.action.startswith('db'): return CONF.sub.action_fn() if CONF.sub.action.startswith('ssh'): return CONF.sub.action_fn() except Exception", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "db_api from catena.db.sqlalchemy import models CONF = cfg.CONF LOG = log.getLogger(__name__) def register_models():", "] process = subprocess.Popen(args) process.wait() def register_sub_opts(subparser): parser = subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser", "catena.common.utils import decrypt_rsakey from catena.db.sqlalchemy import api as db_api from catena.db.sqlalchemy import models", "required by applicable law or agreed to in writing, software # distributed under", "from catena.db.sqlalchemy import models CONF = cfg.CONF LOG = log.getLogger(__name__) def register_models(): context", "oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from oslo_log import log from catena.common", "applicable law or agreed to in writing, software # distributed under the License", "import log from catena.common import config from catena.common.utils import decrypt_private_rsakey from catena.common.utils import", "main(): \"\"\"Parse options and call the appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging() try: if", "or agreed to in writing, software # distributed under the License is distributed", "enginefacade.writer.get_engine() return models.register_models(context) def unregister_models(): context = enginefacade.writer.get_engine() return models.unregister_models(context) def output_ssh_key(): context", "'-c', 'ssh -i {} -o ProxyCommand=\"ssh -q -i {} -W %h:%p ubuntu@{}\" -o", "CONF.sub.node_id) if node is None: return LOG.error('This node-id does not exist') print(decrypt_rsakey(node.ssh_key)) def", "node.ip) ] process = subprocess.Popen(args) process.wait() def register_sub_opts(subparser): parser = subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync')", "import models CONF = cfg.CONF LOG = log.getLogger(__name__) def register_models(): context = enginefacade.writer.get_engine()", "from catena.common.utils import decrypt_private_rsakey from catena.common.utils import decrypt_rsakey from catena.db.sqlalchemy import api as", "governing permissions and # limitations under the License. from __future__ import print_function import", "is None: return LOG.error('This node-id does not exist') print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection(): context =", "under the Apache License, Version 2.0 (the \"License\"); # you may not use", "writing, software # distributed under the License is distributed on an \"AS IS\"", "None: return LOG.error('This node-id does not exist') print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection(): context = db_api.get_context()", "chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile( dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile( dir=home) as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey(", "not exist') node = db_api.get_node(context, chain, CONF.sub.node_id) if node is None: return LOG.error('This", "You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "parser.set_defaults(action='db_remove') parser = subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser = subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id')", "License. # You may obtain a copy of the License at # #", "'sub', dest='sub', title='Sub Options', handler=register_sub_opts) ] def main(): \"\"\"Parse options and call the", "is None: return LOG.error('This node-id does not exist') home = os.path.expanduser(\"~/.ssh\") jumpbox_ip =", "parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser = subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS =", "api as db_api from catena.db.sqlalchemy import models CONF = cfg.CONF LOG = log.getLogger(__name__)", "sys import tempfile from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from oslo_log", "-o ProxyCommand=\"ssh -q -i {} -W %h:%p ubuntu@{}\" -o ' 'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name,", "compliance with the License. # You may obtain a copy of the License", "= [ cfg.SubCommandOpt( 'sub', dest='sub', title='Sub Options', handler=register_sub_opts) ] def main(): \"\"\"Parse options", "cfg.CONF LOG = log.getLogger(__name__) def register_models(): context = enginefacade.writer.get_engine() return models.register_models(context) def unregister_models():", "enginefacade from oslo_log import log from catena.common import config from catena.common.utils import decrypt_private_rsakey", "log.getLogger(__name__) def register_models(): context = enginefacade.writer.get_engine() return models.register_models(context) def unregister_models(): context = enginefacade.writer.get_engine()", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile( dir=home) as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh )", "models.register_models(context) def unregister_models(): context = enginefacade.writer.get_engine() return models.unregister_models(context) def output_ssh_key(): context = db_api.get_context()", "parser = subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser = subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection)", "= subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser = subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser = subparser.add_parser('ssh_key') parser.add_argument('chain_id')", "Hewlett Packard Enterprise Development LP. # # Licensed under the Apache License, Version", "not use this file except in compliance with the License. # You may", "'ssh -i {} -o ProxyCommand=\"ssh -q -i {} -W %h:%p ubuntu@{}\" -o '", "import os import subprocess import sys import tempfile from oslo_config import cfg from", "= subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser = subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh')", "License, Version 2.0 (the \"License\"); # you may not use this file except", ") args = [ '/bin/bash', '-c', 'ssh -i {} -o ProxyCommand=\"ssh -q -i", "exist') home = os.path.expanduser(\"~/.ssh\") jumpbox_ip = chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile( dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile(", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS = [ cfg.SubCommandOpt( 'sub', dest='sub', title='Sub Options', handler=register_sub_opts)", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #", "from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from oslo_log import log from", "Options', handler=register_sub_opts) ] def main(): \"\"\"Parse options and call the appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS)", "# you may not use this file except in compliance with the License.", "open_ssh_connection(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain is None: return", "tempfile.NamedTemporaryFile( dir=home) as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh ) args = [", "-i {} -W %h:%p ubuntu@{}\" -o ' 'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip, node.ip)", "node is None: return LOG.error('This node-id does not exist') print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection(): context", "agreed to in writing, software # distributed under the License is distributed on", "LOG.error('This chain-id does not exist') node = db_api.get_node(context, chain, CONF.sub.node_id) if node is", "(the \"License\"); # you may not use this file except in compliance with", "# Unless required by applicable law or agreed to in writing, software #", "subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS = [ cfg.SubCommandOpt( 'sub', dest='sub', title='Sub Options',", "by applicable law or agreed to in writing, software # distributed under the", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "oslo_db.sqlalchemy import enginefacade from oslo_log import log from catena.common import config from catena.common.utils", "os.path.expanduser(\"~/.ssh\") jumpbox_ip = chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile( dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile( dir=home) as temp_jumpbox_ssh:", "the appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging() try: if CONF.sub.action.startswith('db'): return CONF.sub.action_fn() if CONF.sub.action.startswith('ssh'):", "file except in compliance with the License. # You may obtain a copy", "node-id does not exist') print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection(): context = db_api.get_context() chain = db_api.get_chain(context,", "License for the specific language governing permissions and # limitations under the License.", "CONDITIONS OF ANY KIND, either express or # implied. # See the License", "parser = subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser = subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser = subparser.add_parser('ssh_key')", "options and call the appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging() try: if CONF.sub.action.startswith('db'): return", "with tempfile.NamedTemporaryFile( dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile( dir=home) as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'],", "to in writing, software # distributed under the License is distributed on an", "exist') print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain", "implied. # See the License for the specific language governing permissions and #", "parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser = subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser = subparser.add_parser('ssh') parser.add_argument('chain_id')", "\"License\"); # you may not use this file except in compliance with the", "obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "return LOG.error('This node-id does not exist') print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection(): context = db_api.get_context() chain", "Packard Enterprise Development LP. # # Licensed under the Apache License, Version 2.0", "subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser = subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser = subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id')", "-q -i {} -W %h:%p ubuntu@{}\" -o ' 'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip,", "ProxyCommand=\"ssh -q -i {} -W %h:%p ubuntu@{}\" -o ' 'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name,", "oslo_log import log from catena.common import config from catena.common.utils import decrypt_private_rsakey from catena.common.utils", "from catena.db.sqlalchemy import api as db_api from catena.db.sqlalchemy import models CONF = cfg.CONF", "= subprocess.Popen(args) process.wait() def register_sub_opts(subparser): parser = subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser = subparser.add_parser('db_remove')", "def unregister_models(): context = enginefacade.writer.get_engine() return models.unregister_models(context) def output_ssh_key(): context = db_api.get_context() chain", "Apache License, Version 2.0 (the \"License\"); # you may not use this file", "may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "# implied. # See the License for the specific language governing permissions and", "subprocess.Popen(args) process.wait() def register_sub_opts(subparser): parser = subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser = subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models)", "[ '/bin/bash', '-c', 'ssh -i {} -o ProxyCommand=\"ssh -q -i {} -W %h:%p", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "cfg from oslo_db.sqlalchemy import enginefacade from oslo_log import log from catena.common import config", "in writing, software # distributed under the License is distributed on an \"AS", "chain, CONF.sub.node_id) if node is None: return LOG.error('This node-id does not exist') home", "parser = subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser = subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser", "or # implied. # See the License for the specific language governing permissions", "OR CONDITIONS OF ANY KIND, either express or # implied. # See the", "context = enginefacade.writer.get_engine() return models.unregister_models(context) def output_ssh_key(): context = db_api.get_context() chain = db_api.get_chain(context,", "# See the License for the specific language governing permissions and # limitations", "the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "home = os.path.expanduser(\"~/.ssh\") jumpbox_ip = chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile( dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile( dir=home)", "catena.db.sqlalchemy import api as db_api from catena.db.sqlalchemy import models CONF = cfg.CONF LOG", "LOG.error('This node-id does not exist') print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection(): context = db_api.get_context() chain =", "parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS = [ cfg.SubCommandOpt( 'sub', dest='sub', title='Sub Options', handler=register_sub_opts) ]", "the Apache License, Version 2.0 (the \"License\"); # you may not use this", "you may not use this file except in compliance with the License. #", "{} -o ProxyCommand=\"ssh -q -i {} -W %h:%p ubuntu@{}\" -o ' 'StrictHostKeyChecking=no ubuntu@{}'.format(", "-i {} -o ProxyCommand=\"ssh -q -i {} -W %h:%p ubuntu@{}\" -o ' 'StrictHostKeyChecking=no", "def register_models(): context = enginefacade.writer.get_engine() return models.register_models(context) def unregister_models(): context = enginefacade.writer.get_engine() return", "{} -W %h:%p ubuntu@{}\" -o ' 'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip, node.ip) ]", "return LOG.error('This chain-id does not exist') node = db_api.get_node(context, chain, CONF.sub.node_id) if node", "-o ' 'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip, node.ip) ] process = subprocess.Popen(args) process.wait()", "use this file except in compliance with the License. # You may obtain", "] def main(): \"\"\"Parse options and call the appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging()", "def main(): \"\"\"Parse options and call the appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging() try:", "return models.register_models(context) def unregister_models(): context = enginefacade.writer.get_engine() return models.unregister_models(context) def output_ssh_key(): context =", "ubuntu@{}\" -o ' 'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip, node.ip) ] process = subprocess.Popen(args)", "# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may", "print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain is", "= db_api.get_chain(context, CONF.sub.chain_id) if chain is None: return LOG.error('This chain-id does not exist')", "parser.set_defaults(action='db_sync') parser = subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser = subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key')", "import enginefacade from oslo_log import log from catena.common import config from catena.common.utils import", "temp_jumpbox_ssh.name, jumpbox_ip, node.ip) ] process = subprocess.Popen(args) process.wait() def register_sub_opts(subparser): parser = subparser.add_parser('db_sync')", "2.0 (the \"License\"); # you may not use this file except in compliance", "chain = db_api.get_chain(context, CONF.sub.chain_id) if chain is None: return LOG.error('This chain-id does not", "= chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile( dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile( dir=home) as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh)", "from catena.common.utils import decrypt_rsakey from catena.db.sqlalchemy import api as db_api from catena.db.sqlalchemy import", "decrypt_private_rsakey from catena.common.utils import decrypt_rsakey from catena.db.sqlalchemy import api as db_api from catena.db.sqlalchemy", "temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh ) args = [ '/bin/bash', '-c', 'ssh", "unregister_models(): context = enginefacade.writer.get_engine() return models.unregister_models(context) def output_ssh_key(): context = db_api.get_context() chain =", "# # Unless required by applicable law or agreed to in writing, software", "jumpbox_ip = chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile( dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile( dir=home) as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key,", "class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging() try: if CONF.sub.action.startswith('db'): return CONF.sub.action_fn() if CONF.sub.action.startswith('ssh'): return CONF.sub.action_fn()", "from oslo_log import log from catena.common import config from catena.common.utils import decrypt_private_rsakey from", "= subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser = subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser =", "def register_sub_opts(subparser): parser = subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser = subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser", "db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain is None: return LOG.error('This chain-id does", "output_ssh_key(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain is None: return", "'/bin/bash', '-c', 'ssh -i {} -o ProxyCommand=\"ssh -q -i {} -W %h:%p ubuntu@{}\"", "None: return LOG.error('This node-id does not exist') home = os.path.expanduser(\"~/.ssh\") jumpbox_ip = chain.get_cloud_config()['jumpbox_ip']", "Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "the specific language governing permissions and # limitations under the License. from __future__", "try: if CONF.sub.action.startswith('db'): return CONF.sub.action_fn() if CONF.sub.action.startswith('ssh'): return CONF.sub.action_fn() except Exception as e:", "parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS = [ cfg.SubCommandOpt( 'sub', dest='sub', title='Sub Options', handler=register_sub_opts) ] def", "def output_ssh_key(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain is None:", "[ cfg.SubCommandOpt( 'sub', dest='sub', title='Sub Options', handler=register_sub_opts) ] def main(): \"\"\"Parse options and", "the License. # You may obtain a copy of the License at #", "Development LP. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "# distributed under the License is distributed on an \"AS IS\" BASIS, #", "-W %h:%p ubuntu@{}\" -o ' 'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip, node.ip) ] process", "__future__ import print_function import os import subprocess import sys import tempfile from oslo_config", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "from __future__ import print_function import os import subprocess import sys import tempfile from", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied.", "if chain is None: return LOG.error('This chain-id does not exist') node = db_api.get_node(context,", "parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser = subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS = [ cfg.SubCommandOpt(", "import config from catena.common.utils import decrypt_private_rsakey from catena.common.utils import decrypt_rsakey from catena.db.sqlalchemy import", "import decrypt_private_rsakey from catena.common.utils import decrypt_rsakey from catena.db.sqlalchemy import api as db_api from", "as temp_node_ssh, tempfile.NamedTemporaryFile( dir=home) as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh ) args", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See", "= db_api.get_node(context, chain, CONF.sub.node_id) if node is None: return LOG.error('This node-id does not", "# (C) Copyright 2017 Hewlett Packard Enterprise Development LP. # # Licensed under", "with the License. # You may obtain a copy of the License at", "dir=home) as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh ) args = [ '/bin/bash',", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you", "License. from __future__ import print_function import os import subprocess import sys import tempfile", "db_api.get_node(context, chain, CONF.sub.node_id) if node is None: return LOG.error('This node-id does not exist')", "if node is None: return LOG.error('This node-id does not exist') home = os.path.expanduser(\"~/.ssh\")", "as db_api from catena.db.sqlalchemy import models CONF = cfg.CONF LOG = log.getLogger(__name__) def", "does not exist') home = os.path.expanduser(\"~/.ssh\") jumpbox_ip = chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile( dir=home) as", "chain is None: return LOG.error('This chain-id does not exist') node = db_api.get_node(context, chain,", "tempfile.NamedTemporaryFile( dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile( dir=home) as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh", "CONF.sub.chain_id) if chain is None: return LOG.error('This chain-id does not exist') node =", "enginefacade.writer.get_engine() return models.unregister_models(context) def output_ssh_key(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if", "temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh ) args = [ '/bin/bash', '-c', 'ssh -i {}", "law or agreed to in writing, software # distributed under the License is", "the License for the specific language governing permissions and # limitations under the", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "import api as db_api from catena.db.sqlalchemy import models CONF = cfg.CONF LOG =", "db_api.get_chain(context, CONF.sub.chain_id) if chain is None: return LOG.error('This chain-id does not exist') node", "Copyright 2017 Hewlett Packard Enterprise Development LP. # # Licensed under the Apache", "= os.path.expanduser(\"~/.ssh\") jumpbox_ip = chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile( dir=home) as temp_node_ssh, tempfile.NamedTemporaryFile( dir=home) as", "and call the appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging() try: if CONF.sub.action.startswith('db'): return CONF.sub.action_fn()", "decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh ) args = [ '/bin/bash', '-c', 'ssh -i {} -o", "register_models(): context = enginefacade.writer.get_engine() return models.register_models(context) def unregister_models(): context = enginefacade.writer.get_engine() return models.unregister_models(context)", "ANY KIND, either express or # implied. # See the License for the", "# limitations under the License. from __future__ import print_function import os import subprocess", "def open_ssh_connection(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain is None:", "log from catena.common import config from catena.common.utils import decrypt_private_rsakey from catena.common.utils import decrypt_rsakey", "decrypt_rsakey from catena.db.sqlalchemy import api as db_api from catena.db.sqlalchemy import models CONF =", "node is None: return LOG.error('This node-id does not exist') home = os.path.expanduser(\"~/.ssh\") jumpbox_ip", "import tempfile from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from oslo_log import", "in compliance with the License. # You may obtain a copy of the", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "= log.getLogger(__name__) def register_models(): context = enginefacade.writer.get_engine() return models.register_models(context) def unregister_models(): context =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "models.unregister_models(context) def output_ssh_key(): context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain is", "See the License for the specific language governing permissions and # limitations under", "temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip, node.ip) ] process = subprocess.Popen(args) process.wait() def register_sub_opts(subparser): parser =", "KIND, either express or # implied. # See the License for the specific", "import decrypt_rsakey from catena.db.sqlalchemy import api as db_api from catena.db.sqlalchemy import models CONF", "parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser = subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS = [", "and # limitations under the License. from __future__ import print_function import os import", "%h:%p ubuntu@{}\" -o ' 'StrictHostKeyChecking=no ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip, node.ip) ] process =", "Enterprise Development LP. # # Licensed under the Apache License, Version 2.0 (the", "if node is None: return LOG.error('This node-id does not exist') print(decrypt_rsakey(node.ssh_key)) def open_ssh_connection():", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "models CONF = cfg.CONF LOG = log.getLogger(__name__) def register_models(): context = enginefacade.writer.get_engine() return", "appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:]) config.setup_logging() try: if CONF.sub.action.startswith('db'): return CONF.sub.action_fn() if CONF.sub.action.startswith('ssh'): return", "context = db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain is None: return LOG.error('This", "= enginefacade.writer.get_engine() return models.register_models(context) def unregister_models(): context = enginefacade.writer.get_engine() return models.unregister_models(context) def output_ssh_key():", "import sys import tempfile from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from", "handler=register_sub_opts) ] def main(): \"\"\"Parse options and call the appropriate class/method.\"\"\" CONF.register_cli_opts(SUB_OPTS) config.parse_args(sys.argv[1:])", "exist') node = db_api.get_node(context, chain, CONF.sub.node_id) if node is None: return LOG.error('This node-id", "context = enginefacade.writer.get_engine() return models.register_models(context) def unregister_models(): context = enginefacade.writer.get_engine() return models.unregister_models(context) def", "chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh ) args = [ '/bin/bash', '-c', 'ssh -i {} -o ProxyCommand=\"ssh", "= subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS = [ cfg.SubCommandOpt( 'sub', dest='sub', title='Sub", "from oslo_db.sqlalchemy import enginefacade from oslo_log import log from catena.common import config from", "Version 2.0 (the \"License\"); # you may not use this file except in", "for the specific language governing permissions and # limitations under the License. from", "except in compliance with the License. # You may obtain a copy of", "express or # implied. # See the License for the specific language governing", "catena.common import config from catena.common.utils import decrypt_private_rsakey from catena.common.utils import decrypt_rsakey from catena.db.sqlalchemy", "subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser = subparser.add_parser('ssh') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=open_ssh_connection) parser.set_defaults(action='ssh') SUB_OPTS", "# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "may not use this file except in compliance with the License. # You", "License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "ubuntu@{}'.format( temp_node_ssh.name, temp_jumpbox_ssh.name, jumpbox_ip, node.ip) ] process = subprocess.Popen(args) process.wait() def register_sub_opts(subparser): parser", "process = subprocess.Popen(args) process.wait() def register_sub_opts(subparser): parser = subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser =", "is None: return LOG.error('This chain-id does not exist') node = db_api.get_node(context, chain, CONF.sub.node_id)", "return LOG.error('This node-id does not exist') home = os.path.expanduser(\"~/.ssh\") jumpbox_ip = chain.get_cloud_config()['jumpbox_ip'] with", "(C) Copyright 2017 Hewlett Packard Enterprise Development LP. # # Licensed under the", "OF ANY KIND, either express or # implied. # See the License for", "permissions and # limitations under the License. from __future__ import print_function import os", "subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove') parser = subparser.add_parser('ssh_key') parser.add_argument('chain_id') parser.add_argument('node_id') parser.set_defaults(action_fn=output_ssh_key) parser.set_defaults(action='ssh_key') parser = subparser.add_parser('ssh')", "= db_api.get_context() chain = db_api.get_chain(context, CONF.sub.chain_id) if chain is None: return LOG.error('This chain-id", "tempfile from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade from oslo_log import log", "does not exist') node = db_api.get_node(context, chain, CONF.sub.node_id) if node is None: return", "CONF.sub.node_id) if node is None: return LOG.error('This node-id does not exist') home =", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. #", "chain-id does not exist') node = db_api.get_node(context, chain, CONF.sub.node_id) if node is None:", "if CONF.sub.action.startswith('db'): return CONF.sub.action_fn() if CONF.sub.action.startswith('ssh'): return CONF.sub.action_fn() except Exception as e: sys.exit(\"ERROR:", "process.wait() def register_sub_opts(subparser): parser = subparser.add_parser('db_sync') parser.set_defaults(action_fn=register_models) parser.set_defaults(action='db_sync') parser = subparser.add_parser('db_remove') parser.set_defaults(action_fn=unregister_models) parser.set_defaults(action='db_remove')", "import cfg from oslo_db.sqlalchemy import enginefacade from oslo_log import log from catena.common import", "title='Sub Options', handler=register_sub_opts) ] def main(): \"\"\"Parse options and call the appropriate class/method.\"\"\"", "print_function import os import subprocess import sys import tempfile from oslo_config import cfg", "None: return LOG.error('This chain-id does not exist') node = db_api.get_node(context, chain, CONF.sub.node_id) if", "LOG.error('This node-id does not exist') home = os.path.expanduser(\"~/.ssh\") jumpbox_ip = chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile(", "distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT", "CONF = cfg.CONF LOG = log.getLogger(__name__) def register_models(): context = enginefacade.writer.get_engine() return models.register_models(context)", "import print_function import os import subprocess import sys import tempfile from oslo_config import", "config.setup_logging() try: if CONF.sub.action.startswith('db'): return CONF.sub.action_fn() if CONF.sub.action.startswith('ssh'): return CONF.sub.action_fn() except Exception as", "subprocess import sys import tempfile from oslo_config import cfg from oslo_db.sqlalchemy import enginefacade", "as temp_jumpbox_ssh: decrypt_private_rsakey(node.ssh_key, temp_node_ssh) decrypt_private_rsakey( chain.get_cloud_config()['jumpbox_key'], temp_jumpbox_ssh ) args = [ '/bin/bash', '-c',", "under the License. from __future__ import print_function import os import subprocess import sys", "node-id does not exist') home = os.path.expanduser(\"~/.ssh\") jumpbox_ip = chain.get_cloud_config()['jumpbox_ip'] with tempfile.NamedTemporaryFile( dir=home)", "dest='sub', title='Sub Options', handler=register_sub_opts) ] def main(): \"\"\"Parse options and call the appropriate", "catena.db.sqlalchemy import models CONF = cfg.CONF LOG = log.getLogger(__name__) def register_models(): context =" ]
[ "[<PASSWORD> , <PASSWORD> ] numero_de_cuenta= [ 3115996681 , 32221822 ] tipo_de_cuenta= [ 'corriente',", "print(\"Saldo en cuenta:\",dinero[indice] ) if opcion==3: print(\"Digite el valor de la trasferencia:\") valor_trasferencia=input()", ", <PASSWORD> ] numero_de_cuenta= [ 3115996681 , 32221822 ] tipo_de_cuenta= [ 'corriente', 'ahorros'", "cerrada\") sesion_terminada=True if opcion==5: print(\"\\n\") print(\"Estado cuentas banco\") for i in list(range(0,len(persona))): print(\"\\n\")", "sesion_terminada=True if opcion==5: print(\"\\n\") print(\"Estado cuentas banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\"", "while(True): sesion=False while(sesion==False): print(\"\\n\") print(\"INICIO DE SESIÓN\") print(\"ingrese usuario:\") usuario_ingresado=input() print(\"ingrese contraseña:\") contraseña_ingresada=input()", "1000.00 , 1500.00 ] rta_pregunta_de_seguridad= ['perro' , 'murcielago' ] estado_sesion= ['cerrada' , 'cerrada'", "rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") else: print(\"sesion no activa, tramite invalido\") else: print(\"Opción invalida\")", "SESIÓN\") print(\"ingrese usuario:\") usuario_ingresado=input() print(\"ingrese contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada) if usuario_ingresado in usuario: indice=usuario.index(usuario_ingresado)", "inicial en el banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i])", ") print(\"Saldo en cuenta:\",dinero[indice] ) if opcion==3: print(\"Digite el valor de la trasferencia:\")", "usuario:\") usuario_ingresado=input() print(\"ingrese contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada) if usuario_ingresado in usuario: indice=usuario.index(usuario_ingresado) if contraseña[indice]", "existe\") sesion_terminada=False while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA ALGUNA TRANSACCION POR EL NUMERO:\") print(\"1: Consultar saldo\")", "indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa al numero de cuenta:\",numero_de_cuenta_final ) print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo", "> dinero[indice]: print(\"Fondos insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro ) print(\"Saldo en cuenta:\",dinero[indice] )", "[1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if estado_sesion[indice] =='activa': if opcion==1: print(\"Saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) if", "[ 3115996681 , 32221822 ] tipo_de_cuenta= [ 'corriente', 'ahorros' ] dinero= [ 1000.00", "3115996681 , 32221822 ] tipo_de_cuenta= [ 'corriente', 'ahorros' ] dinero= [ 1000.00 ,", "['perro' , 'murcielago' ] estado_sesion= ['cerrada' , 'cerrada' ] ''' print(\"Estado inicial en", "ALGUNA TRANSACCION POR EL NUMERO:\") print(\"1: Consultar saldo\") print(\"2: Hacer retiro\") print(\"3: Transferir", "saldo\") print(\"2: Hacer retiro\") print(\"3: Transferir dinero\") print(\"4: Cerrar sesión\") print(\"5: Estado todas", "print(\"Digite el valor a retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro) if valor_retiro > dinero[indice]: print(\"Fondos insuficientes\")", "if opcion==4: if estado_sesion[indice] =='activa': estado_sesion[indice] ='cerrada' print(\"sesion cerrada\") else: print(\"sesion ya está", "rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") ''' while(True): sesion=False while(sesion==False): print(\"\\n\") print(\"INICIO DE SESIÓN\") print(\"ingrese", "'ahorros' ] dinero= [ 1000.00 , 1500.00 ] rta_pregunta_de_seguridad= ['perro' , 'murcielago' ]", "Consultar saldo\") print(\"2: Hacer retiro\") print(\"3: Transferir dinero\") print(\"4: Cerrar sesión\") print(\"5: Estado", "print(\"4: Cerrar sesión\") print(\"5: Estado todas las cuentas del banco\") opcion=input() opcion=int(opcion) if", "opcion==2: print(\"Digite el valor a retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro) if valor_retiro > dinero[indice]: print(\"Fondos", "TRANSACCION POR EL NUMERO:\") print(\"1: Consultar saldo\") print(\"2: Hacer retiro\") print(\"3: Transferir dinero\")", "print(\"Fondos insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro ) print(\"Saldo en cuenta:\",dinero[indice] ) if opcion==3:", "contraseña_ingresada=input() print(contraseña_ingresada) if usuario_ingresado in usuario: indice=usuario.index(usuario_ingresado) if contraseña[indice] == int(contraseña_ingresada): estado_sesion[indice] ='activa'", "] ''' print(\"Estado inicial en el banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\"", "en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) if opcion==2: print(\"Digite el valor a retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro)", "logueado\") sesion=True else: print(\"contraseña invalida\") else: print(\"usuario no existe\") sesion_terminada=False while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA", "insuficientes\") else: if numero_de_cuenta_final in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa al numero", ", 1500.00 ] rta_pregunta_de_seguridad= ['perro' , 'murcielago' ] estado_sesion= ['cerrada' , 'cerrada' ]", "Estado todas las cuentas del banco\") opcion=input() opcion=int(opcion) if opcion in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado)", "numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") else: print(\"sesion no", "en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) else: print(\"la cuenta de destino no existe\") if opcion==4:", "dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa al numero de cuenta:\",numero_de_cuenta_final ) print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo en", "print(\"Saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) if opcion==2: print(\"Digite el valor a retirar:\") valor_retiro=input()", "valor_trasferencia=int(valor_trasferencia) print(\"Digite la cuenta de destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia > dinero[indice]: print(\"fondos", "print(\"trasferencia exitosa al numero de cuenta:\",numero_de_cuenta_final ) print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice]", "else: print(\"contraseña invalida\") else: print(\"usuario no existe\") sesion_terminada=False while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA ALGUNA TRANSACCION", "in usuario: indice=usuario.index(usuario_ingresado) if contraseña[indice] == int(contraseña_ingresada): estado_sesion[indice] ='activa' print(\"usuario logueado\") sesion=True else:", "print(\"la cuenta de destino no existe\") if opcion==4: if estado_sesion[indice] =='activa': estado_sesion[indice] ='cerrada'", "else: print(\"usuario no existe\") sesion_terminada=False while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA ALGUNA TRANSACCION POR EL NUMERO:\")", "no existe\") sesion_terminada=False while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA ALGUNA TRANSACCION POR EL NUMERO:\") print(\"1: Consultar", "destino no existe\") if opcion==4: if estado_sesion[indice] =='activa': estado_sesion[indice] ='cerrada' print(\"sesion cerrada\") else:", "print(\"\\n\") print(\"Estado cuentas banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i])", "print(\"ingrese contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada) if usuario_ingresado in usuario: indice=usuario.index(usuario_ingresado) if contraseña[indice] == int(contraseña_ingresada):", "['<NAME>', '<NAME>' ] usuario= ['superman' , 'batman' ] contraseña= [<PASSWORD> , <PASSWORD> ]", "] usuario= ['superman' , 'batman' ] contraseña= [<PASSWORD> , <PASSWORD> ] numero_de_cuenta= [", "if opcion==5: print(\"\\n\") print(\"Estado cuentas banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i])", "print(\"saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) else: print(\"la cuenta de destino no existe\") if", "print(\"fondos insuficientes\") else: if numero_de_cuenta_final in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa al", "if numero_de_cuenta_final in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa al numero de cuenta:\",numero_de_cuenta_final", ") if opcion==3: print(\"Digite el valor de la trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite la", "cerrada\") else: print(\"sesion ya está cerrada\") sesion_terminada=True if opcion==5: print(\"\\n\") print(\"Estado cuentas banco\")", "destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia > dinero[indice]: print(\"fondos insuficientes\") else: if numero_de_cuenta_final in", "] rta_pregunta_de_seguridad= ['perro' , 'murcielago' ] estado_sesion= ['cerrada' , 'cerrada' ] ''' print(\"Estado", "print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") ''' while(True): sesion=False while(sesion==False): print(\"\\n\") print(\"INICIO DE SESIÓN\")", "persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i])", "trasferencia:\",valor_trasferencia) print(\"saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) else: print(\"la cuenta de destino no existe\")", ", 'cerrada' ] ''' print(\"Estado inicial en el banco\") for i in list(range(0,len(persona))):", "valor de la trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite la cuenta de destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final)", "opcion=input() opcion=int(opcion) if opcion in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if estado_sesion[indice] =='activa': if opcion==1: print(\"Saldo", "if valor_retiro > dinero[indice]: print(\"Fondos insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro ) print(\"Saldo en", "estado_sesion[indice] =='activa': if opcion==1: print(\"Saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) if opcion==2: print(\"Digite el", "contraseña= [<PASSWORD> , <PASSWORD> ] numero_de_cuenta= [ 3115996681 , 32221822 ] tipo_de_cuenta= [", "] tipo_de_cuenta= [ 'corriente', 'ahorros' ] dinero= [ 1000.00 , 1500.00 ] rta_pregunta_de_seguridad=", "DE SESIÓN\") print(\"ingrese usuario:\") usuario_ingresado=input() print(\"ingrese contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada) if usuario_ingresado in usuario:", "for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i])", "en cuenta:\",dinero[indice] ) if opcion==3: print(\"Digite el valor de la trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia)", "numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia > dinero[indice]: print(\"fondos insuficientes\") else: if numero_de_cuenta_final in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final)", "cuenta de destino no existe\") if opcion==4: if estado_sesion[indice] =='activa': estado_sesion[indice] ='cerrada' print(\"sesion", "valor_retiro > dinero[indice]: print(\"Fondos insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro ) print(\"Saldo en cuenta:\",dinero[indice]", "else: if numero_de_cuenta_final in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa al numero de", "valor a retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro) if valor_retiro > dinero[indice]: print(\"Fondos insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro", "> dinero[indice]: print(\"fondos insuficientes\") else: if numero_de_cuenta_final in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia", "el valor de la trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite la cuenta de destino:\") numero_de_cuenta_final=input()", "print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") else: print(\"sesion no activa, tramite invalido\") else: print(\"Opción", "dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") ''' while(True): sesion=False while(sesion==False): print(\"\\n\") print(\"INICIO DE", ", 32221822 ] tipo_de_cuenta= [ 'corriente', 'ahorros' ] dinero= [ 1000.00 , 1500.00", "='activa' print(\"usuario logueado\") sesion=True else: print(\"contraseña invalida\") else: print(\"usuario no existe\") sesion_terminada=False while(sesion_terminada==False):", "contraseña[indice] == int(contraseña_ingresada): estado_sesion[indice] ='activa' print(\"usuario logueado\") sesion=True else: print(\"contraseña invalida\") else: print(\"usuario", "\"\"\" Juandabu \"\"\" persona= ['<NAME>', '<NAME>' ] usuario= ['superman' , 'batman' ] contraseña=", "print(\"\\n\") ''' while(True): sesion=False while(sesion==False): print(\"\\n\") print(\"INICIO DE SESIÓN\") print(\"ingrese usuario:\") usuario_ingresado=input() print(\"ingrese", "if usuario_ingresado in usuario: indice=usuario.index(usuario_ingresado) if contraseña[indice] == int(contraseña_ingresada): estado_sesion[indice] ='activa' print(\"usuario logueado\")", "invalida\") else: print(\"usuario no existe\") sesion_terminada=False while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA ALGUNA TRANSACCION POR EL", "opcion=int(opcion) if opcion in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if estado_sesion[indice] =='activa': if opcion==1: print(\"Saldo en", "numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa al numero de cuenta:\",numero_de_cuenta_final ) print(\"valor trasferencia:\",valor_trasferencia)", "tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") else: print(\"sesion no activa, tramite", "'corriente', 'ahorros' ] dinero= [ 1000.00 , 1500.00 ] rta_pregunta_de_seguridad= ['perro' , 'murcielago'", "] numero_de_cuenta= [ 3115996681 , 32221822 ] tipo_de_cuenta= [ 'corriente', 'ahorros' ] dinero=", "if opcion==3: print(\"Digite el valor de la trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite la cuenta", "'murcielago' ] estado_sesion= ['cerrada' , 'cerrada' ] ''' print(\"Estado inicial en el banco\")", "numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") ''' while(True): sesion=False", "estado_sesion[indice] =='activa': estado_sesion[indice] ='cerrada' print(\"sesion cerrada\") else: print(\"sesion ya está cerrada\") sesion_terminada=True if", "in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa al numero de cuenta:\",numero_de_cuenta_final ) print(\"valor", "print(\"Valor retiro:\",valor_retiro ) print(\"Saldo en cuenta:\",dinero[indice] ) if opcion==3: print(\"Digite el valor de", "print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") else: print(\"sesion no activa, tramite invalido\")", "[ 'corriente', 'ahorros' ] dinero= [ 1000.00 , 1500.00 ] rta_pregunta_de_seguridad= ['perro' ,", "while(sesion==False): print(\"\\n\") print(\"INICIO DE SESIÓN\") print(\"ingrese usuario:\") usuario_ingresado=input() print(\"ingrese contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada) if", "Hacer retiro\") print(\"3: Transferir dinero\") print(\"4: Cerrar sesión\") print(\"5: Estado todas las cuentas", "de cuenta:\",numero_de_cuenta_final ) print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) else: print(\"la cuenta", ", 'murcielago' ] estado_sesion= ['cerrada' , 'cerrada' ] ''' print(\"Estado inicial en el", "print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") ''' while(True): sesion=False while(sesion==False):", "=='activa': if opcion==1: print(\"Saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) if opcion==2: print(\"Digite el valor", "retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro) if valor_retiro > dinero[indice]: print(\"Fondos insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro", "else: print(\"sesion ya está cerrada\") sesion_terminada=True if opcion==5: print(\"\\n\") print(\"Estado cuentas banco\") for", "i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\"", "rta_pregunta_de_seguridad= ['perro' , 'murcielago' ] estado_sesion= ['cerrada' , 'cerrada' ] ''' print(\"Estado inicial", "] estado_sesion= ['cerrada' , 'cerrada' ] ''' print(\"Estado inicial en el banco\") for", "else: print(\"la cuenta de destino no existe\") if opcion==4: if estado_sesion[indice] =='activa': estado_sesion[indice]", "cuentas banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i])", "NUMERO:\") print(\"1: Consultar saldo\") print(\"2: Hacer retiro\") print(\"3: Transferir dinero\") print(\"4: Cerrar sesión\")", "''' print(\"Estado inicial en el banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i])", "while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA ALGUNA TRANSACCION POR EL NUMERO:\") print(\"1: Consultar saldo\") print(\"2: Hacer", "print(\"\\n\") print(\"ELIJA ALGUNA TRANSACCION POR EL NUMERO:\") print(\"1: Consultar saldo\") print(\"2: Hacer retiro\")", "print(\"sesion cerrada\") else: print(\"sesion ya está cerrada\") sesion_terminada=True if opcion==5: print(\"\\n\") print(\"Estado cuentas", "valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite la cuenta de destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia > dinero[indice]:", "print(\" contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\")", "'cerrada' ] ''' print(\"Estado inicial en el banco\") for i in list(range(0,len(persona))): print(\"\\n\")", "print(\"1: Consultar saldo\") print(\"2: Hacer retiro\") print(\"3: Transferir dinero\") print(\"4: Cerrar sesión\") print(\"5:", ") else: print(\"la cuenta de destino no existe\") if opcion==4: if estado_sesion[indice] =='activa':", "print(\"contraseña invalida\") else: print(\"usuario no existe\") sesion_terminada=False while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA ALGUNA TRANSACCION POR", "del banco\") opcion=input() opcion=int(opcion) if opcion in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if estado_sesion[indice] =='activa': if", "print(\"Estado inicial en el banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\"", "contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") '''", "if contraseña[indice] == int(contraseña_ingresada): estado_sesion[indice] ='activa' print(\"usuario logueado\") sesion=True else: print(\"contraseña invalida\") else:", "\"\"\" persona= ['<NAME>', '<NAME>' ] usuario= ['superman' , 'batman' ] contraseña= [<PASSWORD> ,", "banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i]) print(\"", "print(\"3: Transferir dinero\") print(\"4: Cerrar sesión\") print(\"5: Estado todas las cuentas del banco\")", "sesion=True else: print(\"contraseña invalida\") else: print(\"usuario no existe\") sesion_terminada=False while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA ALGUNA", "de destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia > dinero[indice]: print(\"fondos insuficientes\") else: if numero_de_cuenta_final", "no existe\") if opcion==4: if estado_sesion[indice] =='activa': estado_sesion[indice] ='cerrada' print(\"sesion cerrada\") else: print(\"sesion", "Cerrar sesión\") print(\"5: Estado todas las cuentas del banco\") opcion=input() opcion=int(opcion) if opcion", "opcion in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if estado_sesion[indice] =='activa': if opcion==1: print(\"Saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice]", "print(\"Estado cuentas banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\"", "Transferir dinero\") print(\"4: Cerrar sesión\") print(\"5: Estado todas las cuentas del banco\") opcion=input()", "estado_sesion= ['cerrada' , 'cerrada' ] ''' print(\"Estado inicial en el banco\") for i", "estado_sesion[indice] ='activa' print(\"usuario logueado\") sesion=True else: print(\"contraseña invalida\") else: print(\"usuario no existe\") sesion_terminada=False", "cuenta de destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia > dinero[indice]: print(\"fondos insuficientes\") else: if", "estado_sesion[indice] ='cerrada' print(\"sesion cerrada\") else: print(\"sesion ya está cerrada\") sesion_terminada=True if opcion==5: print(\"\\n\")", "='cerrada' print(\"sesion cerrada\") else: print(\"sesion ya está cerrada\") sesion_terminada=True if opcion==5: print(\"\\n\") print(\"Estado", "valor_retiro=input() valor_retiro=int(valor_retiro) if valor_retiro > dinero[indice]: print(\"Fondos insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro )", "dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") else: print(\"sesion no activa, tramite invalido\") else:", "estado_sesion:\",estado_sesion[i]) print(\"\\n\") ''' while(True): sesion=False while(sesion==False): print(\"\\n\") print(\"INICIO DE SESIÓN\") print(\"ingrese usuario:\") usuario_ingresado=input()", "opcion==3: print(\"Digite el valor de la trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite la cuenta de", "indice=usuario.index(usuario_ingresado) if contraseña[indice] == int(contraseña_ingresada): estado_sesion[indice] ='activa' print(\"usuario logueado\") sesion=True else: print(\"contraseña invalida\")", "in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i])", "a retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro) if valor_retiro > dinero[indice]: print(\"Fondos insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor", "print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\"", "print(\"Digite el valor de la trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite la cuenta de destino:\")", "cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) if opcion==2: print(\"Digite el valor a retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro) if", "opcion==1: print(\"Saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) if opcion==2: print(\"Digite el valor a retirar:\")", "trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite la cuenta de destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia >", "numero de cuenta:\",numero_de_cuenta_final ) print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) else: print(\"la", "32221822 ] tipo_de_cuenta= [ 'corriente', 'ahorros' ] dinero= [ 1000.00 , 1500.00 ]", "print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") else: print(\"sesion no activa,", "['cerrada' , 'cerrada' ] ''' print(\"Estado inicial en el banco\") for i in", "dinero= [ 1000.00 , 1500.00 ] rta_pregunta_de_seguridad= ['perro' , 'murcielago' ] estado_sesion= ['cerrada'", "print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") ''' while(True): sesion=False while(sesion==False): print(\"\\n\") print(\"INICIO DE SESIÓN\") print(\"ingrese usuario:\")", "la cuenta de destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia > dinero[indice]: print(\"fondos insuficientes\") else:", "está cerrada\") sesion_terminada=True if opcion==5: print(\"\\n\") print(\"Estado cuentas banco\") for i in list(range(0,len(persona))):", "sesión\") print(\"5: Estado todas las cuentas del banco\") opcion=input() opcion=int(opcion) if opcion in", "int(contraseña_ingresada): estado_sesion[indice] ='activa' print(\"usuario logueado\") sesion=True else: print(\"contraseña invalida\") else: print(\"usuario no existe\")", "1500.00 ] rta_pregunta_de_seguridad= ['perro' , 'murcielago' ] estado_sesion= ['cerrada' , 'cerrada' ] '''", "list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\"", "EL NUMERO:\") print(\"1: Consultar saldo\") print(\"2: Hacer retiro\") print(\"3: Transferir dinero\") print(\"4: Cerrar", "dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa al numero de cuenta:\",numero_de_cuenta_final ) print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo en cuenta\",numero_de_cuenta[indice]", "''' while(True): sesion=False while(sesion==False): print(\"\\n\") print(\"INICIO DE SESIÓN\") print(\"ingrese usuario:\") usuario_ingresado=input() print(\"ingrese contraseña:\")", "] contraseña= [<PASSWORD> , <PASSWORD> ] numero_de_cuenta= [ 3115996681 , 32221822 ] tipo_de_cuenta=", "print(\"ELIJA ALGUNA TRANSACCION POR EL NUMERO:\") print(\"1: Consultar saldo\") print(\"2: Hacer retiro\") print(\"3:", "usuario= ['superman' , 'batman' ] contraseña= [<PASSWORD> , <PASSWORD> ] numero_de_cuenta= [ 3115996681", "todas las cuentas del banco\") opcion=input() opcion=int(opcion) if opcion in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if", ") print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) else: print(\"la cuenta de destino", "en el banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\"", "cuenta:\",dinero[indice] ) if opcion==3: print(\"Digite el valor de la trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite", "print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") ''' while(True):", "in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if estado_sesion[indice] =='activa': if opcion==1: print(\"Saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] )", "dinero[indice]: print(\"fondos insuficientes\") else: if numero_de_cuenta_final in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa", "valor_trasferencia > dinero[indice]: print(\"fondos insuficientes\") else: if numero_de_cuenta_final in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia", ", 'batman' ] contraseña= [<PASSWORD> , <PASSWORD> ] numero_de_cuenta= [ 3115996681 , 32221822", "de destino no existe\") if opcion==4: if estado_sesion[indice] =='activa': estado_sesion[indice] ='cerrada' print(\"sesion cerrada\")", "'<NAME>' ] usuario= ['superman' , 'batman' ] contraseña= [<PASSWORD> , <PASSWORD> ] numero_de_cuenta=", "cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) else: print(\"la cuenta de destino no existe\") if opcion==4: if", "print(\"\\n\") print(\"INICIO DE SESIÓN\") print(\"ingrese usuario:\") usuario_ingresado=input() print(\"ingrese contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada) if usuario_ingresado", "else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro ) print(\"Saldo en cuenta:\",dinero[indice] ) if opcion==3: print(\"Digite el", "de la trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite la cuenta de destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if", "print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") ''' while(True): sesion=False while(sesion==False): print(\"\\n\") print(\"INICIO", "numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia > dinero[indice]: print(\"fondos insuficientes\") else: if numero_de_cuenta_final in numero_de_cuenta:", "persona= ['<NAME>', '<NAME>' ] usuario= ['superman' , 'batman' ] contraseña= [<PASSWORD> , <PASSWORD>", ") if opcion==2: print(\"Digite el valor a retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro) if valor_retiro >", "numero_de_cuenta= [ 3115996681 , 32221822 ] tipo_de_cuenta= [ 'corriente', 'ahorros' ] dinero= [", "=='activa': estado_sesion[indice] ='cerrada' print(\"sesion cerrada\") else: print(\"sesion ya está cerrada\") sesion_terminada=True if opcion==5:", "print(\"ingrese usuario:\") usuario_ingresado=input() print(\"ingrese contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada) if usuario_ingresado in usuario: indice=usuario.index(usuario_ingresado) if", "dinero[indice]: print(\"Fondos insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro ) print(\"Saldo en cuenta:\",dinero[indice] ) if", "tipo_de_cuenta= [ 'corriente', 'ahorros' ] dinero= [ 1000.00 , 1500.00 ] rta_pregunta_de_seguridad= ['perro'", "print(\"Digite la cuenta de destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia > dinero[indice]: print(\"fondos insuficientes\")", ",\":\",dinero[indice] ) else: print(\"la cuenta de destino no existe\") if opcion==4: if estado_sesion[indice]", "Juandabu \"\"\" persona= ['<NAME>', '<NAME>' ] usuario= ['superman' , 'batman' ] contraseña= [<PASSWORD>", "print(\"usuario logueado\") sesion=True else: print(\"contraseña invalida\") else: print(\"usuario no existe\") sesion_terminada=False while(sesion_terminada==False): print(\"\\n\")", "existe\") if opcion==4: if estado_sesion[indice] =='activa': estado_sesion[indice] ='cerrada' print(\"sesion cerrada\") else: print(\"sesion ya", "if opcion in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if estado_sesion[indice] =='activa': if opcion==1: print(\"Saldo en cuenta\",numero_de_cuenta[indice]", "opcion==4: if estado_sesion[indice] =='activa': estado_sesion[indice] ='cerrada' print(\"sesion cerrada\") else: print(\"sesion ya está cerrada\")", "sesion=False while(sesion==False): print(\"\\n\") print(\"INICIO DE SESIÓN\") print(\"ingrese usuario:\") usuario_ingresado=input() print(\"ingrese contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada)", "insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro ) print(\"Saldo en cuenta:\",dinero[indice] ) if opcion==3: print(\"Digite", "<PASSWORD> ] numero_de_cuenta= [ 3115996681 , 32221822 ] tipo_de_cuenta= [ 'corriente', 'ahorros' ]", "] dinero= [ 1000.00 , 1500.00 ] rta_pregunta_de_seguridad= ['perro' , 'murcielago' ] estado_sesion=", "cuentas del banco\") opcion=input() opcion=int(opcion) if opcion in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if estado_sesion[indice] =='activa':", "numero_de_cuenta_final in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia dinero[indice_cta]=dinero[indice_cta]+valor_trasferencia print(\"trasferencia exitosa al numero de cuenta:\",numero_de_cuenta_final )", "las cuentas del banco\") opcion=input() opcion=int(opcion) if opcion in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if estado_sesion[indice]", "print(\"2: Hacer retiro\") print(\"3: Transferir dinero\") print(\"4: Cerrar sesión\") print(\"5: Estado todas las", "al numero de cuenta:\",numero_de_cuenta_final ) print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) else:", "print(\"INICIO DE SESIÓN\") print(\"ingrese usuario:\") usuario_ingresado=input() print(\"ingrese contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada) if usuario_ingresado in", "retiro\") print(\"3: Transferir dinero\") print(\"4: Cerrar sesión\") print(\"5: Estado todas las cuentas del", "[ 1000.00 , 1500.00 ] rta_pregunta_de_seguridad= ['perro' , 'murcielago' ] estado_sesion= ['cerrada' ,", "if opcion==2: print(\"Digite el valor a retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro) if valor_retiro > dinero[indice]:", "el banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i])", "print(\"\\n\") print(\" persona:\",persona[i]) print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i])", "cuenta:\",numero_de_cuenta_final ) print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) else: print(\"la cuenta de", "if opcion==1: print(\"Saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) if opcion==2: print(\"Digite el valor a", "banco\") opcion=input() opcion=int(opcion) if opcion in [1,2,3,4,5]: indice=usuario.index(usuario_ingresado) if estado_sesion[indice] =='activa': if opcion==1:", "contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") else:", "la trasferencia:\") valor_trasferencia=input() valor_trasferencia=int(valor_trasferencia) print(\"Digite la cuenta de destino:\") numero_de_cuenta_final=input() numero_de_cuenta_final=int(numero_de_cuenta_final) if valor_trasferencia", ",\":\",dinero[indice] ) if opcion==2: print(\"Digite el valor a retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro) if valor_retiro", "print(\"sesion ya está cerrada\") sesion_terminada=True if opcion==5: print(\"\\n\") print(\"Estado cuentas banco\") for i", "usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i])", "indice=usuario.index(usuario_ingresado) if estado_sesion[indice] =='activa': if opcion==1: print(\"Saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) if opcion==2:", "if valor_trasferencia > dinero[indice]: print(\"fondos insuficientes\") else: if numero_de_cuenta_final in numero_de_cuenta: indice_cta=numero_de_cuenta.index(numero_de_cuenta_final) dinero[indice]=dinero[indice]-valor_trasferencia", "usuario: indice=usuario.index(usuario_ingresado) if contraseña[indice] == int(contraseña_ingresada): estado_sesion[indice] ='activa' print(\"usuario logueado\") sesion=True else: print(\"contraseña", "print(\" usuario:\",usuario[i]) print(\" contraseña:\",contraseña[i]) print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\"", "print(\"5: Estado todas las cuentas del banco\") opcion=input() opcion=int(opcion) if opcion in [1,2,3,4,5]:", "print(contraseña_ingresada) if usuario_ingresado in usuario: indice=usuario.index(usuario_ingresado) if contraseña[indice] == int(contraseña_ingresada): estado_sesion[indice] ='activa' print(\"usuario", "retiro:\",valor_retiro ) print(\"Saldo en cuenta:\",dinero[indice] ) if opcion==3: print(\"Digite el valor de la", "dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro ) print(\"Saldo en cuenta:\",dinero[indice] ) if opcion==3: print(\"Digite el valor", "POR EL NUMERO:\") print(\"1: Consultar saldo\") print(\"2: Hacer retiro\") print(\"3: Transferir dinero\") print(\"4:", "exitosa al numero de cuenta:\",numero_de_cuenta_final ) print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] )", "print(\" numero_de_cuenta:\",numero_de_cuenta[i]) print(\" tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") else: print(\"sesion", "sesion_terminada=False while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA ALGUNA TRANSACCION POR EL NUMERO:\") print(\"1: Consultar saldo\") print(\"2:", "print(\"valor trasferencia:\",valor_trasferencia) print(\"saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) else: print(\"la cuenta de destino no", "['superman' , 'batman' ] contraseña= [<PASSWORD> , <PASSWORD> ] numero_de_cuenta= [ 3115996681 ,", "'batman' ] contraseña= [<PASSWORD> , <PASSWORD> ] numero_de_cuenta= [ 3115996681 , 32221822 ]", "usuario_ingresado in usuario: indice=usuario.index(usuario_ingresado) if contraseña[indice] == int(contraseña_ingresada): estado_sesion[indice] ='activa' print(\"usuario logueado\") sesion=True", "if estado_sesion[indice] =='activa': if opcion==1: print(\"Saldo en cuenta\",numero_de_cuenta[indice] ,\":\",dinero[indice] ) if opcion==2: print(\"Digite", "valor_retiro=int(valor_retiro) if valor_retiro > dinero[indice]: print(\"Fondos insuficientes\") else: dinero[indice]=dinero[indice]-valor_retiro print(\"Valor retiro:\",valor_retiro ) print(\"Saldo", "ya está cerrada\") sesion_terminada=True if opcion==5: print(\"\\n\") print(\"Estado cuentas banco\") for i in", "dinero\") print(\"4: Cerrar sesión\") print(\"5: Estado todas las cuentas del banco\") opcion=input() opcion=int(opcion)", "tipo_de_cuenta:\",tipo_de_cuenta[i]) print(\" dinero:\",dinero[i]) print(\" rta_pregunta_de_seguridad:\",rta_pregunta_de_seguridad[i]) print(\" estado_sesion:\",estado_sesion[i]) print(\"\\n\") ''' while(True): sesion=False while(sesion==False): print(\"\\n\")", "el valor a retirar:\") valor_retiro=input() valor_retiro=int(valor_retiro) if valor_retiro > dinero[indice]: print(\"Fondos insuficientes\") else:", "contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada) if usuario_ingresado in usuario: indice=usuario.index(usuario_ingresado) if contraseña[indice] == int(contraseña_ingresada): estado_sesion[indice]", "usuario_ingresado=input() print(\"ingrese contraseña:\") contraseña_ingresada=input() print(contraseña_ingresada) if usuario_ingresado in usuario: indice=usuario.index(usuario_ingresado) if contraseña[indice] ==", "if estado_sesion[indice] =='activa': estado_sesion[indice] ='cerrada' print(\"sesion cerrada\") else: print(\"sesion ya está cerrada\") sesion_terminada=True", "== int(contraseña_ingresada): estado_sesion[indice] ='activa' print(\"usuario logueado\") sesion=True else: print(\"contraseña invalida\") else: print(\"usuario no", "print(\"usuario no existe\") sesion_terminada=False while(sesion_terminada==False): print(\"\\n\") print(\"ELIJA ALGUNA TRANSACCION POR EL NUMERO:\") print(\"1:", "opcion==5: print(\"\\n\") print(\"Estado cuentas banco\") for i in list(range(0,len(persona))): print(\"\\n\") print(\" persona:\",persona[i]) print(\"" ]
[ "TOKEN = sys.argv[1] if __name__ == \"__main__\": init() # create config and line", "not os.path.exists(\"line\"): os.mkdir(\"line\") if not os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while True: try: cli = discord.Client()", "debug mode, only serve messages from test return if not DEBUG and msg.channel.name", "import discord from bot import init_cfg, init_bot from template import handle, Context DEBUG", "tts_bot = init_bot() online = {} async def helper(msg: discord.Message): if DEBUG and", "def on_message(msg: discord.Message): await helper(msg) @cli.event async def on_message_edit(before: discord.Message, after: discord.Message): await", "from test return if not DEBUG and msg.channel.name == \"test\": # not in", "in debug mode, only serve messages from test return if not DEBUG and", "not DEBUG and msg.channel.name == \"test\": # not in debug mode, skip messages", "os.path.exists(\"line\"): os.mkdir(\"line\") if not os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while True: try: cli = discord.Client() tts_bot", "= init_cfg(guild_id) await handle(Context(tts_bot, cli, online[guild_id], msg)) @cli.event async def on_message(msg: discord.Message): await", "on_message(msg: discord.Message): await helper(msg) @cli.event async def on_message_edit(before: discord.Message, after: discord.Message): await helper(after)", "@cli.event async def on_message_edit(before: discord.Message, after: discord.Message): await helper(after) cli.run(TOKEN) except Exception as", "def helper(msg: discord.Message): if DEBUG and msg.channel.name != \"test\": # in debug mode,", "# create config and line dir if not os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if not os.path.exists(\"line\"):", "os import sys import discord from bot import init_cfg, init_bot from template import", "online[guild_id] = init_cfg(guild_id) await handle(Context(tts_bot, cli, online[guild_id], msg)) @cli.event async def on_message(msg: discord.Message):", "= \"\" def init(): if len(sys.argv) <= 1: sys.exit(\"start template: python main.py <TOKEN>\")", "len(sys.argv) <= 1: sys.exit(\"start template: python main.py <TOKEN>\") global TOKEN TOKEN = sys.argv[1]", "async def on_message(msg: discord.Message): await helper(msg) @cli.event async def on_message_edit(before: discord.Message, after: discord.Message):", "await handle(Context(tts_bot, cli, online[guild_id], msg)) @cli.event async def on_message(msg: discord.Message): await helper(msg) @cli.event", "1: sys.exit(\"start template: python main.py <TOKEN>\") global TOKEN TOKEN = sys.argv[1] if __name__", "if not DEBUG and msg.channel.name == \"test\": # not in debug mode, skip", "online[guild_id], msg)) @cli.event async def on_message(msg: discord.Message): await helper(msg) @cli.event async def on_message_edit(before:", "async def helper(msg: discord.Message): if DEBUG and msg.channel.name != \"test\": # in debug", "<= 1: sys.exit(\"start template: python main.py <TOKEN>\") global TOKEN TOKEN = sys.argv[1] if", "config and line dir if not os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if not os.path.exists(\"line\"): os.mkdir(\"line\") if", "os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while True: try: cli = discord.Client() tts_bot = init_bot() online =", "not in debug mode, skip messages from test return guild_id = str(msg.guild.id) if", "if not os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if not os.path.exists(\"line\"): os.mkdir(\"line\") if not os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while", "cli, online[guild_id], msg)) @cli.event async def on_message(msg: discord.Message): await helper(msg) @cli.event async def", "skip messages from test return guild_id = str(msg.guild.id) if guild_id not in online:", "TOKEN = \"\" def init(): if len(sys.argv) <= 1: sys.exit(\"start template: python main.py", "# not in debug mode, skip messages from test return guild_id = str(msg.guild.id)", "create config and line dir if not os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if not os.path.exists(\"line\"): os.mkdir(\"line\")", "== \"__main__\": init() # create config and line dir if not os.path.exists(\"cfg\"): os.mkdir(\"cfg\")", "debug mode, skip messages from test return guild_id = str(msg.guild.id) if guild_id not", "init() # create config and line dir if not os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if not", "if __name__ == \"__main__\": init() # create config and line dir if not", "and msg.channel.name == \"test\": # not in debug mode, skip messages from test", "msg)) @cli.event async def on_message(msg: discord.Message): await helper(msg) @cli.event async def on_message_edit(before: discord.Message,", "= True TOKEN = \"\" def init(): if len(sys.argv) <= 1: sys.exit(\"start template:", "try: cli = discord.Client() tts_bot = init_bot() online = {} async def helper(msg:", "@cli.event async def on_message(msg: discord.Message): await helper(msg) @cli.event async def on_message_edit(before: discord.Message, after:", "if guild_id not in online: online[guild_id] = init_cfg(guild_id) await handle(Context(tts_bot, cli, online[guild_id], msg))", "import os import sys import discord from bot import init_cfg, init_bot from template", "= {} async def helper(msg: discord.Message): if DEBUG and msg.channel.name != \"test\": #", "line dir if not os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if not os.path.exists(\"line\"): os.mkdir(\"line\") if not os.path.exists(\"emoji\"):", "\"\" def init(): if len(sys.argv) <= 1: sys.exit(\"start template: python main.py <TOKEN>\") global", "TOKEN TOKEN = sys.argv[1] if __name__ == \"__main__\": init() # create config and", "and msg.channel.name != \"test\": # in debug mode, only serve messages from test", "dir if not os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if not os.path.exists(\"line\"): os.mkdir(\"line\") if not os.path.exists(\"emoji\"): os.mkdir(\"emoji\")", "True TOKEN = \"\" def init(): if len(sys.argv) <= 1: sys.exit(\"start template: python", "sys.argv[1] if __name__ == \"__main__\": init() # create config and line dir if", "<TOKEN>\") global TOKEN TOKEN = sys.argv[1] if __name__ == \"__main__\": init() # create", "from test return guild_id = str(msg.guild.id) if guild_id not in online: online[guild_id] =", "msg.channel.name == \"test\": # not in debug mode, skip messages from test return", "in online: online[guild_id] = init_cfg(guild_id) await handle(Context(tts_bot, cli, online[guild_id], msg)) @cli.event async def", "return guild_id = str(msg.guild.id) if guild_id not in online: online[guild_id] = init_cfg(guild_id) await", "template: python main.py <TOKEN>\") global TOKEN TOKEN = sys.argv[1] if __name__ == \"__main__\":", "handle, Context DEBUG = True TOKEN = \"\" def init(): if len(sys.argv) <=", "discord from bot import init_cfg, init_bot from template import handle, Context DEBUG =", "if len(sys.argv) <= 1: sys.exit(\"start template: python main.py <TOKEN>\") global TOKEN TOKEN =", "online: online[guild_id] = init_cfg(guild_id) await handle(Context(tts_bot, cli, online[guild_id], msg)) @cli.event async def on_message(msg:", "test return guild_id = str(msg.guild.id) if guild_id not in online: online[guild_id] = init_cfg(guild_id)", "os.mkdir(\"emoji\") while True: try: cli = discord.Client() tts_bot = init_bot() online = {}", "\"test\": # in debug mode, only serve messages from test return if not", "= init_bot() online = {} async def helper(msg: discord.Message): if DEBUG and msg.channel.name", "DEBUG and msg.channel.name != \"test\": # in debug mode, only serve messages from", "__name__ == \"__main__\": init() # create config and line dir if not os.path.exists(\"cfg\"):", "guild_id not in online: online[guild_id] = init_cfg(guild_id) await handle(Context(tts_bot, cli, online[guild_id], msg)) @cli.event", "messages from test return if not DEBUG and msg.channel.name == \"test\": # not", "not os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while True: try: cli = discord.Client() tts_bot = init_bot() online", "discord.Message): await helper(msg) @cli.event async def on_message_edit(before: discord.Message, after: discord.Message): await helper(after) cli.run(TOKEN)", "init_bot() online = {} async def helper(msg: discord.Message): if DEBUG and msg.channel.name !=", "init_cfg, init_bot from template import handle, Context DEBUG = True TOKEN = \"\"", "DEBUG = True TOKEN = \"\" def init(): if len(sys.argv) <= 1: sys.exit(\"start", "async def on_message_edit(before: discord.Message, after: discord.Message): await helper(after) cli.run(TOKEN) except Exception as e:", "sys.exit(\"start template: python main.py <TOKEN>\") global TOKEN TOKEN = sys.argv[1] if __name__ ==", "on_message_edit(before: discord.Message, after: discord.Message): await helper(after) cli.run(TOKEN) except Exception as e: print(f\"ERROR: {e}\")", "!= \"test\": # in debug mode, only serve messages from test return if", "test return if not DEBUG and msg.channel.name == \"test\": # not in debug", "template import handle, Context DEBUG = True TOKEN = \"\" def init(): if", "if not os.path.exists(\"line\"): os.mkdir(\"line\") if not os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while True: try: cli =", "msg.channel.name != \"test\": # in debug mode, only serve messages from test return", "init_cfg(guild_id) await handle(Context(tts_bot, cli, online[guild_id], msg)) @cli.event async def on_message(msg: discord.Message): await helper(msg)", "\"__main__\": init() # create config and line dir if not os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if", "helper(msg: discord.Message): if DEBUG and msg.channel.name != \"test\": # in debug mode, only", "cli = discord.Client() tts_bot = init_bot() online = {} async def helper(msg: discord.Message):", "in debug mode, skip messages from test return guild_id = str(msg.guild.id) if guild_id", "init(): if len(sys.argv) <= 1: sys.exit(\"start template: python main.py <TOKEN>\") global TOKEN TOKEN", "\"test\": # not in debug mode, skip messages from test return guild_id =", "bot import init_cfg, init_bot from template import handle, Context DEBUG = True TOKEN", "discord.Message): if DEBUG and msg.channel.name != \"test\": # in debug mode, only serve", "python main.py <TOKEN>\") global TOKEN TOKEN = sys.argv[1] if __name__ == \"__main__\": init()", "not os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if not os.path.exists(\"line\"): os.mkdir(\"line\") if not os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while True:", "= discord.Client() tts_bot = init_bot() online = {} async def helper(msg: discord.Message): if", "import sys import discord from bot import init_cfg, init_bot from template import handle,", "main.py <TOKEN>\") global TOKEN TOKEN = sys.argv[1] if __name__ == \"__main__\": init() #", "= sys.argv[1] if __name__ == \"__main__\": init() # create config and line dir", "= str(msg.guild.id) if guild_id not in online: online[guild_id] = init_cfg(guild_id) await handle(Context(tts_bot, cli,", "True: try: cli = discord.Client() tts_bot = init_bot() online = {} async def", "import handle, Context DEBUG = True TOKEN = \"\" def init(): if len(sys.argv)", "messages from test return guild_id = str(msg.guild.id) if guild_id not in online: online[guild_id]", "while True: try: cli = discord.Client() tts_bot = init_bot() online = {} async", "mode, skip messages from test return guild_id = str(msg.guild.id) if guild_id not in", "helper(msg) @cli.event async def on_message_edit(before: discord.Message, after: discord.Message): await helper(after) cli.run(TOKEN) except Exception", "def init(): if len(sys.argv) <= 1: sys.exit(\"start template: python main.py <TOKEN>\") global TOKEN", "str(msg.guild.id) if guild_id not in online: online[guild_id] = init_cfg(guild_id) await handle(Context(tts_bot, cli, online[guild_id],", "init_bot from template import handle, Context DEBUG = True TOKEN = \"\" def", "handle(Context(tts_bot, cli, online[guild_id], msg)) @cli.event async def on_message(msg: discord.Message): await helper(msg) @cli.event async", "def on_message_edit(before: discord.Message, after: discord.Message): await helper(after) cli.run(TOKEN) except Exception as e: print(f\"ERROR:", "and line dir if not os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if not os.path.exists(\"line\"): os.mkdir(\"line\") if not", "serve messages from test return if not DEBUG and msg.channel.name == \"test\": #", "discord.Client() tts_bot = init_bot() online = {} async def helper(msg: discord.Message): if DEBUG", "from template import handle, Context DEBUG = True TOKEN = \"\" def init():", "global TOKEN TOKEN = sys.argv[1] if __name__ == \"__main__\": init() # create config", "return if not DEBUG and msg.channel.name == \"test\": # not in debug mode,", "== \"test\": # not in debug mode, skip messages from test return guild_id", "if not os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while True: try: cli = discord.Client() tts_bot = init_bot()", "guild_id = str(msg.guild.id) if guild_id not in online: online[guild_id] = init_cfg(guild_id) await handle(Context(tts_bot,", "if DEBUG and msg.channel.name != \"test\": # in debug mode, only serve messages", "sys import discord from bot import init_cfg, init_bot from template import handle, Context", "import init_cfg, init_bot from template import handle, Context DEBUG = True TOKEN =", "# in debug mode, only serve messages from test return if not DEBUG", "Context DEBUG = True TOKEN = \"\" def init(): if len(sys.argv) <= 1:", "os.mkdir(\"line\") if not os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while True: try: cli = discord.Client() tts_bot =", "only serve messages from test return if not DEBUG and msg.channel.name == \"test\":", "mode, only serve messages from test return if not DEBUG and msg.channel.name ==", "not in online: online[guild_id] = init_cfg(guild_id) await handle(Context(tts_bot, cli, online[guild_id], msg)) @cli.event async", "from bot import init_cfg, init_bot from template import handle, Context DEBUG = True", "os.path.exists(\"cfg\"): os.mkdir(\"cfg\") if not os.path.exists(\"line\"): os.mkdir(\"line\") if not os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while True: try:", "DEBUG and msg.channel.name == \"test\": # not in debug mode, skip messages from", "online = {} async def helper(msg: discord.Message): if DEBUG and msg.channel.name != \"test\":", "os.mkdir(\"cfg\") if not os.path.exists(\"line\"): os.mkdir(\"line\") if not os.path.exists(\"emoji\"): os.mkdir(\"emoji\") while True: try: cli", "{} async def helper(msg: discord.Message): if DEBUG and msg.channel.name != \"test\": # in", "await helper(msg) @cli.event async def on_message_edit(before: discord.Message, after: discord.Message): await helper(after) cli.run(TOKEN) except" ]
[ "self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11, fc12, fc13,", "Unless required by applicable law or agreed to in writing, software # distributed", "test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2,", "self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_install_flow_classifier(self): pc2.emit_created() # Installed", "= sfc.FlowClassifier( id='fc10', topic='topic1', unique_key=10, source_port='lport1', ) fc11 = sfc.FlowClassifier( id='fc11', topic='topic1', unique_key=11,", "fc12 = sfc.FlowClassifier( id='fc12', topic='topic1', unique_key=12, dest_port='lport1', ) fc13 = sfc.FlowClassifier( id='fc13', topic='topic1',", "mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(3, self.app._install_classification_flows.call_count) # Installed only for source-port and local dest", "local dest ports: self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1,", "ports: self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual( 3, self.app._uninstall_classification_flows.call_count) # Installed", "version=10, unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding, ) fc1 = sfc.FlowClassifier( id='fc1', topic='topic1', unique_key=22, source_port='lport1', )", "l2.LogicalPort( id='lport3', topic='topic1', version=10, unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding, ) fc1 = sfc.FlowClassifier( id='fc1', topic='topic1',", "= l2.LogicalPort( id='lport1', topic='topic1', version=10, unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport2 = l2.LogicalPort( id='lport2',", "p.start() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]),", "Apache License, Version 2.0 (the \"License\"); you may # not use this file", "the License. You may obtain # a copy of the License at #", "may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "def test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1)", "'_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig = getattr(self.app, attribute) p = mock.patch.object(self.app, attribute,", "= ['fc'] def setUp(self): super(TestFcApp, self).setUp() self.app = self.open_flow_app.dispatcher.apps['fc'] for attribute in ('_install_flow_classifier',", "unique_key=12, dest_port='lport1', ) fc13 = sfc.FlowClassifier( id='fc13', topic='topic1', unique_key=13, dest_port='lport2', ) pc2 =", "['fc'] def setUp(self): super(TestFcApp, self).setUp() self.app = self.open_flow_app.dispatcher.apps['fc'] for attribute in ('_install_flow_classifier', '_uninstall_flow_classifier',", "@utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs)", "source-port and local dest ports: self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3,", "sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc3', 'fc2'], ) fc10 = sfc.FlowClassifier( id='fc10', topic='topic1', unique_key=10, source_port='lport1',", "with the License. You may obtain # a copy of the License at", "'fc2'], ) fc10 = sfc.FlowClassifier( id='fc10', topic='topic1', unique_key=10, source_port='lport1', ) fc11 = sfc.FlowClassifier(", "source ports: self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(3, self.app._install_classification_flows.call_count) # Installed", "self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called() self.app._install_dispatch_flows.assert_called_once_with(fc2) @utils.with_local_objects(fc1, fc2, pc1,", "fc2, fc3, pc1, *l2_objs) def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11,", "id='fc3', topic='topic1', unique_key=13, source_port='lport3', ) pc1 = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc2'], )", "l2_objs = (lswitch1, lport1, lport2, lport3) class TestFcApp(test_app_base.DFAppTestBase): apps_list = ['fc'] def setUp(self):", "side_effect=orig) self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [", "id='fc12', topic='topic1', unique_key=12, dest_port='lport1', ) fc13 = sfc.FlowClassifier( id='fc13', topic='topic1', unique_key=13, dest_port='lport2', )", "mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1)", "lport2, lport3) class TestFcApp(test_app_base.DFAppTestBase): apps_list = ['fc'] def setUp(self): super(TestFcApp, self).setUp() self.app =", "use this file except in compliance with the License. You may obtain #", "pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called()", "pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2, fc3,", "unique_key=11, source_port='lport2', ) fc12 = sfc.FlowClassifier( id='fc12', topic='topic1', unique_key=12, dest_port='lport1', ) fc13 =", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "topic='topic1', unique_key=12, dest_port='lport1', ) fc13 = sfc.FlowClassifier( id='fc13', topic='topic1', unique_key=13, dest_port='lport2', ) pc2", "implied. See the # License for the specific language governing permissions and limitations", "fc11, fc12, fc13, pc2, *l2_objs) def test_uninstall_flow_classifier(self): pc2.emit_deleted() # Installed only for dest-port", "'_uninstall_dispatch_flows'): orig = getattr(self.app, attribute) p = mock.patch.object(self.app, attribute, side_effect=orig) self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1,", "from dragonflow.tests.unit import test_app_base lswitch1 = l2.LogicalSwitch( id='lswitch1', topic='topic1', version=10, unique_key=22, ) lport1", "'fc14'], ) l2_objs = (lswitch1, lport1, lport2, lport3) class TestFcApp(test_app_base.DFAppTestBase): apps_list = ['fc']", "topic='topic1', version=10, unique_key=22, ) lport1 = l2.LogicalPort( id='lport1', topic='topic1', version=10, unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding,", "Copyright (c) 2016 OpenStack Foundation. # All Rights Reserved. # # Licensed under", "fc2 = sfc.FlowClassifier( id='fc2', topic='topic1', unique_key=12, dest_port='lport2', ) fc3 = sfc.FlowClassifier( id='fc3', topic='topic1',", "= sfc.FlowClassifier( id='fc12', topic='topic1', unique_key=12, dest_port='lport1', ) fc13 = sfc.FlowClassifier( id='fc13', topic='topic1', unique_key=13,", "lport1 = l2.LogicalPort( id='lport1', topic='topic1', version=10, unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport2 = l2.LogicalPort(", "mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual( 3, self.app._uninstall_classification_flows.call_count) # Installed only for source-port", "only for source-port and local dest ports: self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ],", "you may # not use this file except in compliance with the License.", "only for dest-port and local source ports: self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ],", ") self.assertEqual( 3, self.app._uninstall_classification_flows.call_count) # Installed only for source-port and local dest ports:", "*l2_objs) def test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._uninstall_flow_classifier.call_count)", "KIND, either express or implied. See the # License for the specific language", "apps_list = ['fc'] def setUp(self): super(TestFcApp, self).setUp() self.app = self.open_flow_app.dispatcher.apps['fc'] for attribute in", "], ) self.assertEqual(3, self.app._install_classification_flows.call_count) # Installed only for source-port and local dest ports:", "import utils from dragonflow.tests.unit import test_app_base lswitch1 = l2.LogicalSwitch( id='lswitch1', topic='topic1', version=10, unique_key=22,", "fc2, fc3, pc1, *l2_objs) def test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], )", "orig = getattr(self.app, attribute) p = mock.patch.object(self.app, attribute, side_effect=orig) self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1, fc2,", "pc1add = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc3', 'fc2'], ) pc1remove = sfc.PortChain( id='pc1',", "sfc.PortChain( id='pc2', topic='topic1', flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'], ) l2_objs = (lswitch1, lport1, lport2,", "p = mock.patch.object(self.app, attribute, side_effect=orig) self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def", "file except in compliance with the License. You may obtain # a copy", "fc2, fc3, pc1, *l2_objs) def test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ],", "id='fc2', topic='topic1', unique_key=12, dest_port='lport2', ) fc3 = sfc.FlowClassifier( id='fc3', topic='topic1', unique_key=13, source_port='lport3', )", "and local dest ports: self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._install_dispatch_flows.call_count)", "id='lswitch1', topic='topic1', version=10, unique_key=22, ) lport1 = l2.LogicalPort( id='lport1', topic='topic1', version=10, unique_key=22, lswitch='lswitch1',", "All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "ports: self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2, pc1,", "= sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc2'], ) pc1add = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1',", "self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1,", "flow_classifiers=['fc1', 'fc3', 'fc2'], ) pc1remove = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc2'], ) pc1replace =", "dragonflow.db.models import l2 from dragonflow.db.models import sfc from dragonflow.tests.common import utils from dragonflow.tests.unit", "sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc2'], ) pc1add = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc3',", "unique_key=13, source_port='lport3', ) pc1 = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc2'], ) pc1add =", "topic='topic1', flow_classifiers=['fc2'], ) pc1replace = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc3', 'fc2'], ) fc10 =", "software # distributed under the License is distributed on an \"AS IS\" BASIS,", "id='pc1', topic='topic1', flow_classifiers=['fc2'], ) pc1replace = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc3', 'fc2'], ) fc10", "sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc3', 'fc2'], ) pc1remove = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc2'],", "only for dest-port and local source ports: self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ],", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to", "= l2.LogicalPort( id='lport2', topic='topic1', version=10, unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport3 = l2.LogicalPort( id='lport3',", "'fc12', 'fc14'], ) l2_objs = (lswitch1, lport1, lport2, lport3) class TestFcApp(test_app_base.DFAppTestBase): apps_list =", "topic='topic1', flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'], ) l2_objs = (lswitch1, lport1, lport2, lport3) class", "fc3, pc1, *l2_objs) def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1,", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "pc2, *l2_objs) def test_install_flow_classifier(self): pc2.emit_created() # Installed only for dest-port and local source", "], ) self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_uninstall_flow_classifier(self): pc2.emit_deleted()", "('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig = getattr(self.app, attribute) p = mock.patch.object(self.app,", "unique_key=12, dest_port='lport2', ) fc3 = sfc.FlowClassifier( id='fc3', topic='topic1', unique_key=13, source_port='lport3', ) pc1 =", "the # License for the specific language governing permissions and limitations # under", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with(", "governing permissions and limitations # under the License. import mock from dragonflow.db.models import", "You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", ") self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called()", "and local dest ports: self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count)", "required by applicable law or agreed to in writing, software # distributed under", "applicable law or agreed to in writing, software # distributed under the License", "local source ports: self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(3, self.app._install_classification_flows.call_count) #", "dest ports: self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2,", "mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(3, self.app._install_classification_flows.call_count) # Installed only for source-port and", "topic='topic1', flow_classifiers=['fc1', 'fc3', 'fc2'], ) pc1remove = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc2'], ) pc1replace", "in compliance with the License. You may obtain # a copy of the", "dest_port='lport2', ) pc2 = sfc.PortChain( id='pc2', topic='topic1', flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'], ) l2_objs", "or agreed to in writing, software # distributed under the License is distributed", "@utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ],", "mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_add_fc(self):", "unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport2 = l2.LogicalPort( id='lport2', topic='topic1', version=10, unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding,", "mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_deleted(self):", "id='lport3', topic='topic1', version=10, unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding, ) fc1 = sfc.FlowClassifier( id='fc1', topic='topic1', unique_key=22,", "def setUp(self): super(TestFcApp, self).setUp() self.app = self.open_flow_app.dispatcher.apps['fc'] for attribute in ('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows',", "unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport3 = l2.LogicalPort( id='lport3', topic='topic1', version=10, unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding,", "id='fc1', topic='topic1', unique_key=22, source_port='lport1', ) fc2 = sfc.FlowClassifier( id='fc2', topic='topic1', unique_key=12, dest_port='lport2', )", "License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "writing, software # distributed under the License is distributed on an \"AS IS\"", "pc1, *l2_objs) def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs)", "@utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10,", "mock.patch.object(self.app, attribute, side_effect=orig) self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_created(self): pc1.emit_created()", "super(TestFcApp, self).setUp() self.app = self.open_flow_app.dispatcher.apps['fc'] for attribute in ('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows',", "self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2, fc3, pc1,", "lport3 = l2.LogicalPort( id='lport3', topic='topic1', version=10, unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding, ) fc1 = sfc.FlowClassifier(", "Installed only for dest-port and local source ports: self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]),", "for dest-port and local source ports: self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], )", ") pc2 = sfc.PortChain( id='pc2', topic='topic1', flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'], ) l2_objs =", "self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1,", "utils from dragonflow.tests.unit import test_app_base lswitch1 = l2.LogicalSwitch( id='lswitch1', topic='topic1', version=10, unique_key=22, )", "id='fc10', topic='topic1', unique_key=10, source_port='lport1', ) fc11 = sfc.FlowClassifier( id='fc11', topic='topic1', unique_key=11, source_port='lport2', )", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License,", "fc13, pc2, *l2_objs) def test_uninstall_flow_classifier(self): pc2.emit_deleted() # Installed only for dest-port and local", "], ) self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called()", "self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]),", "Licensed under the Apache License, Version 2.0 (the \"License\"); you may # not", "dragonflow.tests.unit import test_app_base lswitch1 = l2.LogicalSwitch( id='lswitch1', topic='topic1', version=10, unique_key=22, ) lport1 =", "self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2,", "import test_app_base lswitch1 = l2.LogicalSwitch( id='lswitch1', topic='topic1', version=10, unique_key=22, ) lport1 = l2.LogicalPort(", "*l2_objs) def test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_added(self): lport2.emit_bind_local()", "fc3, pc1, *l2_objs) def test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2,", "License. import mock from dragonflow.db.models import l2 from dragonflow.db.models import sfc from dragonflow.tests.common", "sfc.FlowClassifier( id='fc10', topic='topic1', unique_key=10, source_port='lport1', ) fc11 = sfc.FlowClassifier( id='fc11', topic='topic1', unique_key=11, source_port='lport2',", "Installed only for source-port and local dest ports: self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]),", "2.0 (the \"License\"); you may # not use this file except in compliance", "pc1, *l2_objs) def test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_added(self):", "], ) self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_deleted(self): pc1.emit_deleted()", "test_uninstall_flow_classifier(self): pc2.emit_deleted() # Installed only for dest-port and local source ports: self.app._uninstall_classification_flows.has_calls( [", "topic='topic1', version=10, unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding, ) fc1 = sfc.FlowClassifier( id='fc1', topic='topic1', unique_key=22, source_port='lport1',", ") fc11 = sfc.FlowClassifier( id='fc11', topic='topic1', unique_key=11, source_port='lport2', ) fc12 = sfc.FlowClassifier( id='fc12',", "], ) self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with(", "self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_uninstall_flow_classifier(self): pc2.emit_deleted() # Installed only", "# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT", "id='pc1', topic='topic1', flow_classifiers=['fc3', 'fc2'], ) fc10 = sfc.FlowClassifier( id='fc10', topic='topic1', unique_key=10, source_port='lport1', )", "License, Version 2.0 (the \"License\"); you may # not use this file except", "flow_classifiers=['fc2'], ) pc1replace = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc3', 'fc2'], ) fc10 = sfc.FlowClassifier(", "def test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1,", "test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def", "specific language governing permissions and limitations # under the License. import mock from", "source_port='lport1', ) fc2 = sfc.FlowClassifier( id='fc2', topic='topic1', unique_key=12, dest_port='lport2', ) fc3 = sfc.FlowClassifier(", "= getattr(self.app, attribute) p = mock.patch.object(self.app, attribute, side_effect=orig) self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1, fc2, fc3,", "= self.open_flow_app.dispatcher.apps['fc'] for attribute in ('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig =", "= sfc.PortChain( id='pc2', topic='topic1', flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'], ) l2_objs = (lswitch1, lport1,", "mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def", "mock from dragonflow.db.models import l2 from dragonflow.db.models import sfc from dragonflow.tests.common import utils", "[ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def", "pc1, *l2_objs) def test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2,", "= sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc3', 'fc2'], ) fc10 = sfc.FlowClassifier( id='fc10', topic='topic1', unique_key=10,", "source-port and local dest ports: self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3,", "l2.LogicalPort( id='lport2', topic='topic1', version=10, unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport3 = l2.LogicalPort( id='lport3', topic='topic1',", "attribute) p = mock.patch.object(self.app, attribute, side_effect=orig) self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs)", "agreed to in writing, software # distributed under the License is distributed on", "ports: self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(3, self.app._install_classification_flows.call_count) # Installed only", "id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc3', 'fc2'], ) pc1remove = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc2'], )", "*l2_objs) def test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_removed(self): lport1.emit_unbind_local()", "pc1replace = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc3', 'fc2'], ) fc10 = sfc.FlowClassifier( id='fc10', topic='topic1',", "source_port='lport1', ) fc11 = sfc.FlowClassifier( id='fc11', topic='topic1', unique_key=11, source_port='lport2', ) fc12 = sfc.FlowClassifier(", "dragonflow.tests.common import utils from dragonflow.tests.unit import test_app_base lswitch1 = l2.LogicalSwitch( id='lswitch1', topic='topic1', version=10,", "# Unless required by applicable law or agreed to in writing, software #", "= sfc.FlowClassifier( id='fc11', topic='topic1', unique_key=11, source_port='lport2', ) fc12 = sfc.FlowClassifier( id='fc12', topic='topic1', unique_key=12,", "def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs)", "Installed only for source-port and local dest ports: self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]),", "by applicable law or agreed to in writing, software # distributed under the", "@utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called() self.app._install_dispatch_flows.assert_called_once_with(fc2) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs)", ") fc2 = sfc.FlowClassifier( id='fc2', topic='topic1', unique_key=12, dest_port='lport2', ) fc3 = sfc.FlowClassifier( id='fc3',", "under the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc2'], ) pc1add = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc3', 'fc2'],", "TestFcApp(test_app_base.DFAppTestBase): apps_list = ['fc'] def setUp(self): super(TestFcApp, self).setUp() self.app = self.open_flow_app.dispatcher.apps['fc'] for attribute", ") self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1])", "for attribute in ('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig = getattr(self.app, attribute)", "def test_install_flow_classifier(self): pc2.emit_created() # Installed only for dest-port and local source ports: self.app._install_classification_flows.has_calls(", "unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding, ) fc1 = sfc.FlowClassifier( id='fc1', topic='topic1', unique_key=22, source_port='lport1', ) fc2", "and limitations # under the License. import mock from dragonflow.db.models import l2 from", "version=10, unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport2 = l2.LogicalPort( id='lport2', topic='topic1', version=10, unique_key=24, lswitch='lswitch1',", "pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_install_flow_classifier(self): pc2.emit_created() #", "sfc.FlowClassifier( id='fc12', topic='topic1', unique_key=12, dest_port='lport1', ) fc13 = sfc.FlowClassifier( id='fc13', topic='topic1', unique_key=13, dest_port='lport2',", "import l2 from dragonflow.db.models import sfc from dragonflow.tests.common import utils from dragonflow.tests.unit import", "mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_added(self): lport1.emit_bind_local()", "self.open_flow_app.dispatcher.apps['fc'] for attribute in ('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig = getattr(self.app,", "# # Licensed under the Apache License, Version 2.0 (the \"License\"); you may", "2016 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache", "except in compliance with the License. You may obtain # a copy of", "ports: self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11, fc12,", "[ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def", "import sfc from dragonflow.tests.common import utils from dragonflow.tests.unit import test_app_base lswitch1 = l2.LogicalSwitch(", "*l2_objs) def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def", "to in writing, software # distributed under the License is distributed on an", "the specific language governing permissions and limitations # under the License. import mock", "= sfc.FlowClassifier( id='fc1', topic='topic1', unique_key=22, source_port='lport1', ) fc2 = sfc.FlowClassifier( id='fc2', topic='topic1', unique_key=12,", "mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1)", "flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'], ) l2_objs = (lswitch1, lport1, lport2, lport3) class TestFcApp(test_app_base.DFAppTestBase):", "self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called()", "'fc11', 'fc12', 'fc14'], ) l2_objs = (lswitch1, lport1, lport2, lport3) class TestFcApp(test_app_base.DFAppTestBase): apps_list", "Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");", "fc2, fc3, pc1, *l2_objs) def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3,", "source_port='lport3', ) pc1 = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc2'], ) pc1add = sfc.PortChain(", "from dragonflow.db.models import l2 from dragonflow.db.models import sfc from dragonflow.tests.common import utils from", "distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT #", "sfc.FlowClassifier( id='fc2', topic='topic1', unique_key=12, dest_port='lport2', ) fc3 = sfc.FlowClassifier( id='fc3', topic='topic1', unique_key=13, source_port='lport3',", "binding=test_app_base.local_binding, ) lport2 = l2.LogicalPort( id='lport2', topic='topic1', version=10, unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport3", "= sfc.FlowClassifier( id='fc2', topic='topic1', unique_key=12, dest_port='lport2', ) fc3 = sfc.FlowClassifier( id='fc3', topic='topic1', unique_key=13,", "# not use this file except in compliance with the License. You may", "# License for the specific language governing permissions and limitations # under the", "dest-port and local source ports: self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(", "topic='topic1', unique_key=13, dest_port='lport2', ) pc2 = sfc.PortChain( id='pc2', topic='topic1', flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'],", "test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called()", "pc1, *l2_objs) def test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called() self.app._install_dispatch_flows.assert_called_once_with(fc2) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_removed(self):", "def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_remove_fc(self):", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,", "in writing, software # distributed under the License is distributed on an \"AS", "# under the License. import mock from dragonflow.db.models import l2 from dragonflow.db.models import", "Version 2.0 (the \"License\"); you may # not use this file except in", "dest ports: self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11,", ") fc3 = sfc.FlowClassifier( id='fc3', topic='topic1', unique_key=13, source_port='lport3', ) pc1 = sfc.PortChain( id='pc1',", "mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_uninstall_flow_classifier(self):", "version=10, unique_key=22, ) lport1 = l2.LogicalPort( id='lport1', topic='topic1', version=10, unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding, )", "\"License\"); you may # not use this file except in compliance with the", "sfc.FlowClassifier( id='fc13', topic='topic1', unique_key=13, dest_port='lport2', ) pc2 = sfc.PortChain( id='pc2', topic='topic1', flow_classifiers=['fc10', 'fc11',", "self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs)", "= mock.patch.object(self.app, attribute, side_effect=orig) self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_created(self):", "Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); you", "flow_classifiers=['fc3', 'fc2'], ) fc10 = sfc.FlowClassifier( id='fc10', topic='topic1', unique_key=10, source_port='lport1', ) fc11 =", "the Apache License, Version 2.0 (the \"License\"); you may # not use this", "unique_key=22, ) lport1 = l2.LogicalPort( id='lport1', topic='topic1', version=10, unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport2", "lswitch='lswitch1', binding=test_app_base.local_binding, ) fc1 = sfc.FlowClassifier( id='fc1', topic='topic1', unique_key=22, source_port='lport1', ) fc2 =", "fc3, pc1, *l2_objs) def test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], )", "'fc2'], ) pc1remove = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc2'], ) pc1replace = sfc.PortChain( id='pc1',", "= l2.LogicalPort( id='lport3', topic='topic1', version=10, unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding, ) fc1 = sfc.FlowClassifier( id='fc1',", "def test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called()", "[ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(3, self.app._install_classification_flows.call_count) # Installed only for source-port", "class TestFcApp(test_app_base.DFAppTestBase): apps_list = ['fc'] def setUp(self): super(TestFcApp, self).setUp() self.app = self.open_flow_app.dispatcher.apps['fc'] for", "test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1)", "not use this file except in compliance with the License. You may obtain", ") lport3 = l2.LogicalPort( id='lport3', topic='topic1', version=10, unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding, ) fc1 =", ") pc1replace = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc3', 'fc2'], ) fc10 = sfc.FlowClassifier( id='fc10',", ") pc1 = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc2'], ) pc1add = sfc.PortChain( id='pc1',", "pc2, *l2_objs) def test_uninstall_flow_classifier(self): pc2.emit_deleted() # Installed only for dest-port and local source", "limitations # under the License. import mock from dragonflow.db.models import l2 from dragonflow.db.models", "License for the specific language governing permissions and limitations # under the License.", "@utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_uninstall_flow_classifier(self): pc2.emit_deleted() # Installed only for", "language governing permissions and limitations # under the License. import mock from dragonflow.db.models", "self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1,", "pc2.emit_deleted() # Installed only for dest-port and local source ports: self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]),", "*l2_objs) def test_uninstall_flow_classifier(self): pc2.emit_deleted() # Installed only for dest-port and local source ports:", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #", "fc3 = sfc.FlowClassifier( id='fc3', topic='topic1', unique_key=13, source_port='lport3', ) pc1 = sfc.PortChain( id='pc1', topic='topic1',", "local dest ports: self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10,", "# Installed only for source-port and local dest ports: self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]),", "lswitch1 = l2.LogicalSwitch( id='lswitch1', topic='topic1', version=10, unique_key=22, ) lport1 = l2.LogicalPort( id='lport1', topic='topic1',", "l2 from dragonflow.db.models import sfc from dragonflow.tests.common import utils from dragonflow.tests.unit import test_app_base", "fc2, pc1, *l2_objs) def test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def", "l2.LogicalPort( id='lport1', topic='topic1', version=10, unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport2 = l2.LogicalPort( id='lport2', topic='topic1',", "and local source ports: self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(3, self.app._install_classification_flows.call_count)", "def test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1,", "unique_key=22, source_port='lport1', ) fc2 = sfc.FlowClassifier( id='fc2', topic='topic1', unique_key=12, dest_port='lport2', ) fc3 =", "OF ANY KIND, either express or implied. See the # License for the", "permissions and limitations # under the License. import mock from dragonflow.db.models import l2", "pc1, *l2_objs) def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs)", "pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0])", ") lport2 = l2.LogicalPort( id='lport2', topic='topic1', version=10, unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport3 =", "sfc.FlowClassifier( id='fc3', topic='topic1', unique_key=13, source_port='lport3', ) pc1 = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc2'],", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the", "@utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_install_flow_classifier(self): pc2.emit_created() # Installed only for", ") fc10 = sfc.FlowClassifier( id='fc10', topic='topic1', unique_key=10, source_port='lport1', ) fc11 = sfc.FlowClassifier( id='fc11',", "self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1,", "self.assertEqual( 3, self.app._uninstall_classification_flows.call_count) # Installed only for source-port and local dest ports: self.app._uninstall_dispatch_flows.assert_has_calls(", "= (lswitch1, lport1, lport2, lport3) class TestFcApp(test_app_base.DFAppTestBase): apps_list = ['fc'] def setUp(self): super(TestFcApp,", "mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual( 3, self.app._uninstall_classification_flows.call_count) # Installed only for source-port and local", "(the \"License\"); you may # not use this file except in compliance with", "sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc2'], ) pc1replace = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc3', 'fc2'], )", "# # Unless required by applicable law or agreed to in writing, software", "fc3, pc1, *l2_objs) def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2, fc3, pc1,", "topic='topic1', unique_key=22, source_port='lport1', ) fc2 = sfc.FlowClassifier( id='fc2', topic='topic1', unique_key=12, dest_port='lport2', ) fc3", "'_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig = getattr(self.app, attribute) p = mock.patch.object(self.app, attribute, side_effect=orig) self.addCleanup(p.stop) p.start()", "from dragonflow.db.models import sfc from dragonflow.tests.common import utils from dragonflow.tests.unit import test_app_base lswitch1", "self).setUp() self.app = self.open_flow_app.dispatcher.apps['fc'] for attribute in ('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'):", ") pc1add = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc3', 'fc2'], ) pc1remove = sfc.PortChain(", "in ('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig = getattr(self.app, attribute) p =", "self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs)", "self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0])", "fc3, pc1, *l2_objs) def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11, fc12,", "for dest-port and local source ports: self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], )", "sfc from dragonflow.tests.common import utils from dragonflow.tests.unit import test_app_base lswitch1 = l2.LogicalSwitch( id='lswitch1',", "License. You may obtain # a copy of the License at # #", "the License is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "fc12, fc13, pc2, *l2_objs) def test_uninstall_flow_classifier(self): pc2.emit_deleted() # Installed only for dest-port and", "setUp(self): super(TestFcApp, self).setUp() self.app = self.open_flow_app.dispatcher.apps['fc'] for attribute in ('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows',", "@utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs)", "test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called() self.app._install_dispatch_flows.assert_called_once_with(fc2) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_removed(self): lport2.emit_unbind_local() self.app._uninstall_classification_flows.assert_not_called() self.app._uninstall_dispatch_flows.assert_called_once_with(fc2)", "pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3,", "ANY KIND, either express or implied. See the # License for the specific", "pc2.emit_created() # Installed only for dest-port and local source ports: self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]),", "id='pc2', topic='topic1', flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'], ) l2_objs = (lswitch1, lport1, lport2, lport3)", "[ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual( 3, self.app._uninstall_classification_flows.call_count) # Installed only for", "# Installed only for source-port and local dest ports: self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]),", "pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0])", "topic='topic1', flow_classifiers=['fc3', 'fc2'], ) fc10 = sfc.FlowClassifier( id='fc10', topic='topic1', unique_key=10, source_port='lport1', ) fc11", "'_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig = getattr(self.app, attribute) p = mock.patch.object(self.app, attribute, side_effect=orig) self.addCleanup(p.stop)", "self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_uninstall_flow_classifier(self): pc2.emit_deleted() # Installed", "fc10 = sfc.FlowClassifier( id='fc10', topic='topic1', unique_key=10, source_port='lport1', ) fc11 = sfc.FlowClassifier( id='fc11', topic='topic1',", "pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_install_flow_classifier(self): pc2.emit_created() # Installed only", "lswitch='lswitch1', binding=test_app_base.local_binding, ) lport2 = l2.LogicalPort( id='lport2', topic='topic1', version=10, unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding, )", "and local source ports: self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual( 3,", "topic='topic1', unique_key=12, dest_port='lport2', ) fc3 = sfc.FlowClassifier( id='fc3', topic='topic1', unique_key=13, source_port='lport3', ) pc1", "pc1, *l2_objs) def test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_removed(self):", "@utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2,", "# All Rights Reserved. # # Licensed under the Apache License, Version 2.0", "(lswitch1, lport1, lport2, lport3) class TestFcApp(test_app_base.DFAppTestBase): apps_list = ['fc'] def setUp(self): super(TestFcApp, self).setUp()", "under the Apache License, Version 2.0 (the \"License\"); you may # not use", "from dragonflow.tests.common import utils from dragonflow.tests.unit import test_app_base lswitch1 = l2.LogicalSwitch( id='lswitch1', topic='topic1',", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See", ") l2_objs = (lswitch1, lport1, lport2, lport3) class TestFcApp(test_app_base.DFAppTestBase): apps_list = ['fc'] def", ") fc1 = sfc.FlowClassifier( id='fc1', topic='topic1', unique_key=22, source_port='lport1', ) fc2 = sfc.FlowClassifier( id='fc2',", "dragonflow.db.models import sfc from dragonflow.tests.common import utils from dragonflow.tests.unit import test_app_base lswitch1 =", "dest-port and local source ports: self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(3,", "only for source-port and local dest ports: self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ],", "def test_uninstall_flow_classifier(self): pc2.emit_deleted() # Installed only for dest-port and local source ports: self.app._uninstall_classification_flows.has_calls(", "Installed only for dest-port and local source ports: self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]),", ") self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1,", "See the # License for the specific language governing permissions and limitations #", "# Installed only for dest-port and local source ports: self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]),", "= sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc3', 'fc2'], ) pc1remove = sfc.PortChain( id='pc1', topic='topic1',", "test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called() self.app._install_dispatch_flows.assert_called_once_with(fc2)", "sfc.FlowClassifier( id='fc11', topic='topic1', unique_key=11, source_port='lport2', ) fc12 = sfc.FlowClassifier( id='fc12', topic='topic1', unique_key=12, dest_port='lport1',", "test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._uninstall_flow_classifier.call_count) @utils.with_local_objects(fc1, fc2,", "law or agreed to in writing, software # distributed under the License is", "= sfc.FlowClassifier( id='fc13', topic='topic1', unique_key=13, dest_port='lport2', ) pc2 = sfc.PortChain( id='pc2', topic='topic1', flow_classifiers=['fc10',", "fc11 = sfc.FlowClassifier( id='fc11', topic='topic1', unique_key=11, source_port='lport2', ) fc12 = sfc.FlowClassifier( id='fc12', topic='topic1',", "lport1, lport2, lport3) class TestFcApp(test_app_base.DFAppTestBase): apps_list = ['fc'] def setUp(self): super(TestFcApp, self).setUp() self.app", "unique_key=10, source_port='lport1', ) fc11 = sfc.FlowClassifier( id='fc11', topic='topic1', unique_key=11, source_port='lport2', ) fc12 =", "express or implied. See the # License for the specific language governing permissions", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "CONDITIONS OF ANY KIND, either express or implied. See the # License for", "topic='topic1', unique_key=11, source_port='lport2', ) fc12 = sfc.FlowClassifier( id='fc12', topic='topic1', unique_key=12, dest_port='lport1', ) fc13", "for the specific language governing permissions and limitations # under the License. import", "pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with(", "Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version", "fc11, fc12, fc13, pc2, *l2_objs) def test_install_flow_classifier(self): pc2.emit_created() # Installed only for dest-port", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "pc2 = sfc.PortChain( id='pc2', topic='topic1', flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'], ) l2_objs = (lswitch1,", "lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called() self.app._install_dispatch_flows.assert_called_once_with(fc2) @utils.with_local_objects(fc1,", "fc13, pc2, *l2_objs) def test_install_flow_classifier(self): pc2.emit_created() # Installed only for dest-port and local", "test_install_flow_classifier(self): pc2.emit_created() # Installed only for dest-port and local source ports: self.app._install_classification_flows.has_calls( [", "mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual( 3, self.app._uninstall_classification_flows.call_count) # Installed only for source-port and", "def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_replace_fc(self):", "test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1)", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "'fc3', 'fc2'], ) pc1remove = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc2'], ) pc1replace = sfc.PortChain(", "= sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc2'], ) pc1replace = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc3', 'fc2'],", "compliance with the License. You may obtain # a copy of the License", "mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def", ") fc13 = sfc.FlowClassifier( id='fc13', topic='topic1', unique_key=13, dest_port='lport2', ) pc2 = sfc.PortChain( id='pc2',", "3, self.app._uninstall_classification_flows.call_count) # Installed only for source-port and local dest ports: self.app._uninstall_dispatch_flows.assert_has_calls( [", "attribute, side_effect=orig) self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls(", ") self.assertEqual(3, self.app._install_classification_flows.call_count) # Installed only for source-port and local dest ports: self.app._install_dispatch_flows.assert_has_calls(", "import mock from dragonflow.db.models import l2 from dragonflow.db.models import sfc from dragonflow.tests.common import", "pc1 = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc2'], ) pc1add = sfc.PortChain( id='pc1', topic='topic1',", ") self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_uninstall_flow_classifier(self): pc2.emit_deleted() #", "[ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2,", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "pc1, *l2_objs) def test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._install_flow_classifier.call_count)", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in", "pc1remove = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc2'], ) pc1replace = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc3',", "fc2, fc3, pc1, *l2_objs) def test_pc_updated_remove_fc(self): pc1remove.emit_updated(pc1) self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2, fc3,", "unique_key=13, dest_port='lport2', ) pc2 = sfc.PortChain( id='pc2', topic='topic1', flow_classifiers=['fc10', 'fc11', 'fc12', 'fc14'], )", "lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1,", "self.app._install_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(3, self.app._install_classification_flows.call_count) # Installed only for", "fc2, pc1, *l2_objs) def test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def", "binding=test_app_base.local_binding, ) lport3 = l2.LogicalPort( id='lport3', topic='topic1', version=10, unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding, ) fc1", "source_port='lport2', ) fc12 = sfc.FlowClassifier( id='fc12', topic='topic1', unique_key=12, dest_port='lport1', ) fc13 = sfc.FlowClassifier(", "self.app._install_classification_flows.call_count) # Installed only for source-port and local dest ports: self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]),", "self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls(", "l2.LogicalSwitch( id='lswitch1', topic='topic1', version=10, unique_key=22, ) lport1 = l2.LogicalPort( id='lport1', topic='topic1', version=10, unique_key=22,", "lport3) class TestFcApp(test_app_base.DFAppTestBase): apps_list = ['fc'] def setUp(self): super(TestFcApp, self).setUp() self.app = self.open_flow_app.dispatcher.apps['fc']", "*l2_objs) def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2,", ") fc12 = sfc.FlowClassifier( id='fc12', topic='topic1', unique_key=12, dest_port='lport1', ) fc13 = sfc.FlowClassifier( id='fc13',", "topic='topic1', unique_key=13, source_port='lport3', ) pc1 = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc2'], ) pc1add", "self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with(", "*l2_objs) def test_install_flow_classifier(self): pc2.emit_created() # Installed only for dest-port and local source ports:", "mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual(3, self.app._install_classification_flows.call_count) # Installed only for source-port and local", "mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_added(self):", "topic='topic1', flow_classifiers=['fc1', 'fc2'], ) pc1add = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc3', 'fc2'], )", "version=10, unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport3 = l2.LogicalPort( id='lport3', topic='topic1', version=10, unique_key=29, lswitch='lswitch1',", "[ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs)", "lswitch='lswitch1', binding=test_app_base.local_binding, ) lport3 = l2.LogicalPort( id='lport3', topic='topic1', version=10, unique_key=29, lswitch='lswitch1', binding=test_app_base.local_binding, )", "may # not use this file except in compliance with the License. You", "self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called() self.app._install_dispatch_flows.assert_called_once_with(fc2) @utils.with_local_objects(fc1, fc2,", "topic='topic1', unique_key=10, source_port='lport1', ) fc11 = sfc.FlowClassifier( id='fc11', topic='topic1', unique_key=11, source_port='lport2', ) fc12", "attribute in ('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig = getattr(self.app, attribute) p", "either express or implied. See the # License for the specific language governing", "test_app_base lswitch1 = l2.LogicalSwitch( id='lswitch1', topic='topic1', version=10, unique_key=22, ) lport1 = l2.LogicalPort( id='lport1',", ") pc1remove = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc2'], ) pc1replace = sfc.PortChain( id='pc1', topic='topic1',", ") lport1 = l2.LogicalPort( id='lport1', topic='topic1', version=10, unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport2 =", "self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]),", "this file except in compliance with the License. You may obtain # a", "self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_install_flow_classifier(self): pc2.emit_created()", "self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_removed(self): lport1.emit_unbind_local() self.app._uninstall_classification_flows.assert_called_once_with(fc1) self.app._uninstall_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2,", "sfc.FlowClassifier( id='fc1', topic='topic1', unique_key=22, source_port='lport1', ) fc2 = sfc.FlowClassifier( id='fc2', topic='topic1', unique_key=12, dest_port='lport2',", "or implied. See the # License for the specific language governing permissions and", "pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs) def test_install_flow_classifier(self):", "<reponame>qianyuqiao/dragonflow # Copyright (c) 2016 OpenStack Foundation. # All Rights Reserved. # #", "for source-port and local dest ports: self.app._install_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], )", "mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], ) self.assertEqual(3, self.app._install_dispatch_flows.call_count) @utils.with_local_objects(fc10, fc11, fc12, fc13, pc2, *l2_objs)", "id='fc13', topic='topic1', unique_key=13, dest_port='lport2', ) pc2 = sfc.PortChain( id='pc2', topic='topic1', flow_classifiers=['fc10', 'fc11', 'fc12',", "self.app._uninstall_dispatch_flows.call_count) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_src_local_port_added(self): lport1.emit_bind_local() self.app._install_classification_flows.assert_called_once_with(fc1) self.app._install_dispatch_flows.assert_not_called() @utils.with_local_objects(fc1, fc2, pc1,", "id='lport1', topic='topic1', version=10, unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport2 = l2.LogicalPort( id='lport2', topic='topic1', version=10,", "self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual( 3, self.app._uninstall_classification_flows.call_count) # Installed only", "id='lport2', topic='topic1', version=10, unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport3 = l2.LogicalPort( id='lport3', topic='topic1', version=10,", "dest_port='lport2', ) fc3 = sfc.FlowClassifier( id='fc3', topic='topic1', unique_key=13, source_port='lport3', ) pc1 = sfc.PortChain(", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "*l2_objs) def test_pc_created(self): pc1.emit_created() self.app._install_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]), ], ) self.assertEqual(2, self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called()", "for source-port and local dest ports: self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[1]), mock.call(pc2.flow_classifiers[2]), ], )", "self.app = self.open_flow_app.dispatcher.apps['fc'] for attribute in ('_install_flow_classifier', '_uninstall_flow_classifier', '_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig", "self.app._uninstall_classification_flows.call_count) # Installed only for source-port and local dest ports: self.app._uninstall_dispatch_flows.assert_has_calls( [ mock.call(pc2.flow_classifiers[0]),", "flow_classifiers=['fc1', 'fc2'], ) pc1add = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc3', 'fc2'], ) pc1remove", "'fc2'], ) pc1add = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1', 'fc3', 'fc2'], ) pc1remove =", "*l2_objs) def test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called() self.app._install_dispatch_flows.assert_called_once_with(fc2) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_removed(self): lport2.emit_unbind_local()", "# Copyright (c) 2016 OpenStack Foundation. # All Rights Reserved. # # Licensed", "(c) 2016 OpenStack Foundation. # All Rights Reserved. # # Licensed under the", "under the License. import mock from dragonflow.db.models import l2 from dragonflow.db.models import sfc", "topic='topic1', version=10, unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport3 = l2.LogicalPort( id='lport3', topic='topic1', version=10, unique_key=29,", "the License. import mock from dragonflow.db.models import l2 from dragonflow.db.models import sfc from", "@utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2,", "= l2.LogicalSwitch( id='lswitch1', topic='topic1', version=10, unique_key=22, ) lport1 = l2.LogicalPort( id='lport1', topic='topic1', version=10,", "OR CONDITIONS OF ANY KIND, either express or implied. See the # License", "obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "], ) self.assertEqual( 3, self.app._uninstall_classification_flows.call_count) # Installed only for source-port and local dest", "lport2 = l2.LogicalPort( id='lport2', topic='topic1', version=10, unique_key=24, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport3 = l2.LogicalPort(", "getattr(self.app, attribute) p = mock.patch.object(self.app, attribute, side_effect=orig) self.addCleanup(p.stop) p.start() @utils.with_local_objects(fc1, fc2, fc3, pc1,", "fc2, pc1, *l2_objs) def test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called() self.app._install_dispatch_flows.assert_called_once_with(fc2) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def", "@utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [ mock.call(pc1.flow_classifiers[0]), mock.call(pc1.flow_classifiers[1]),", "pc1, *l2_objs) def test_pc_updated_replace_fc(self): pc1replace.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1replace.flow_classifiers[0]) self.app._uninstall_flow_classifier.assert_called_once_with( pc1.flow_classifiers[0]) @utils.with_local_objects(fc10, fc11, fc12, fc13,", "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may #", "topic='topic1', version=10, unique_key=22, lswitch='lswitch1', binding=test_app_base.local_binding, ) lport2 = l2.LogicalPort( id='lport2', topic='topic1', version=10, unique_key=24,", "fc1 = sfc.FlowClassifier( id='fc1', topic='topic1', unique_key=22, source_port='lport1', ) fc2 = sfc.FlowClassifier( id='fc2', topic='topic1',", "*l2_objs) def test_pc_updated_add_fc(self): pc1add.emit_updated(pc1) self.app._install_flow_classifier.assert_called_once_with( pc1add.flow_classifiers[1]) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def", "self.app._install_flow_classifier.call_count) self.app._uninstall_flow_classifier.assert_not_called() @utils.with_local_objects(fc1, fc2, fc3, pc1, *l2_objs) def test_pc_deleted(self): pc1.emit_deleted() self.app._install_flow_classifier.assert_not_called() self.app._uninstall_flow_classifier.assert_has_calls( [", "= sfc.FlowClassifier( id='fc3', topic='topic1', unique_key=13, source_port='lport3', ) pc1 = sfc.PortChain( id='pc1', topic='topic1', flow_classifiers=['fc1',", "source ports: self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual( 3, self.app._uninstall_classification_flows.call_count) #", "local source ports: self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2]), mock.call(pc2.flow_classifiers[3]), ], ) self.assertEqual( 3, self.app._uninstall_classification_flows.call_count)", "'_install_classification_flows', '_install_dispatch_flows', '_uninstall_classification_flows', '_uninstall_dispatch_flows'): orig = getattr(self.app, attribute) p = mock.patch.object(self.app, attribute, side_effect=orig)", "self.assertEqual(3, self.app._install_classification_flows.call_count) # Installed only for source-port and local dest ports: self.app._install_dispatch_flows.assert_has_calls( [", "id='fc11', topic='topic1', unique_key=11, source_port='lport2', ) fc12 = sfc.FlowClassifier( id='fc12', topic='topic1', unique_key=12, dest_port='lport1', )", "def test_dest_local_port_added(self): lport2.emit_bind_local() self.app._install_classification_flows.assert_not_called() self.app._install_dispatch_flows.assert_called_once_with(fc2) @utils.with_local_objects(fc1, fc2, pc1, *l2_objs) def test_dest_local_port_removed(self): lport2.emit_unbind_local() self.app._uninstall_classification_flows.assert_not_called()", "fc12, fc13, pc2, *l2_objs) def test_install_flow_classifier(self): pc2.emit_created() # Installed only for dest-port and", "binding=test_app_base.local_binding, ) fc1 = sfc.FlowClassifier( id='fc1', topic='topic1', unique_key=22, source_port='lport1', ) fc2 = sfc.FlowClassifier(", "dest_port='lport1', ) fc13 = sfc.FlowClassifier( id='fc13', topic='topic1', unique_key=13, dest_port='lport2', ) pc2 = sfc.PortChain(", "fc13 = sfc.FlowClassifier( id='fc13', topic='topic1', unique_key=13, dest_port='lport2', ) pc2 = sfc.PortChain( id='pc2', topic='topic1',", "# Installed only for dest-port and local source ports: self.app._uninstall_classification_flows.has_calls( [ mock.call(pc2.flow_classifiers[0]), mock.call(pc2.flow_classifiers[2])," ]
[ "parser = argparse.ArgumentParser() parser.add_argument( '--pattern', type=str, required=True ) parser.add_argument( '--steps', type=int, default=100000 )", "== '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument( '--pattern', type=str, required=True ) parser.add_argument(", "'--steps', type=int, default=100000 ) parser.add_argument( '--stride', type=int, default=1 ) parser.add_argument( '--skip', type=int, default=0", "as device: device.do_paths(paths) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument(", "skip = args.skip paths = get(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4) with Device(scale=0.99,", "parser.add_argument( '--steps', type=int, default=100000 ) parser.add_argument( '--stride', type=int, default=1 ) parser.add_argument( '--skip', type=int,", "penup=0.4) as device: device.do_paths(paths) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser()", "args.steps stride = args.stride skip = args.skip paths = get(pattern, skip, steps, stride,", "xy.device import Device def main(args): from modules.utils import get_paths_from_n_files as get pattern =", "'--stride', type=int, default=1 ) parser.add_argument( '--skip', type=int, default=0 ) args = parser.parse_args() main(args)", "from modules.utils import get_paths_from_n_files as get pattern = args.pattern steps = args.steps stride", "coding: utf-8 -*- from xy.device import Device def main(args): from modules.utils import get_paths_from_n_files", "steps = args.steps stride = args.stride skip = args.skip paths = get(pattern, skip,", "-*- coding: utf-8 -*- from xy.device import Device def main(args): from modules.utils import", "argparse parser = argparse.ArgumentParser() parser.add_argument( '--pattern', type=str, required=True ) parser.add_argument( '--steps', type=int, default=100000", ") parser.add_argument( '--steps', type=int, default=100000 ) parser.add_argument( '--stride', type=int, default=1 ) parser.add_argument( '--skip',", "#!/usr/bin/python3 # -*- coding: utf-8 -*- from xy.device import Device def main(args): from", "type=str, required=True ) parser.add_argument( '--steps', type=int, default=100000 ) parser.add_argument( '--stride', type=int, default=1 )", "args.stride skip = args.skip paths = get(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4) with", "= args.stride skip = args.skip paths = get(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4)", "args.pattern steps = args.steps stride = args.stride skip = args.skip paths = get(pattern,", "Device def main(args): from modules.utils import get_paths_from_n_files as get pattern = args.pattern steps", "= argparse.ArgumentParser() parser.add_argument( '--pattern', type=str, required=True ) parser.add_argument( '--steps', type=int, default=100000 ) parser.add_argument(", "'--pattern', type=str, required=True ) parser.add_argument( '--steps', type=int, default=100000 ) parser.add_argument( '--stride', type=int, default=1", "= args.pattern steps = args.steps stride = args.stride skip = args.skip paths =", "required=True ) parser.add_argument( '--steps', type=int, default=100000 ) parser.add_argument( '--stride', type=int, default=1 ) parser.add_argument(", "as get pattern = args.pattern steps = args.steps stride = args.stride skip =", "parser.add_argument( '--pattern', type=str, required=True ) parser.add_argument( '--steps', type=int, default=100000 ) parser.add_argument( '--stride', type=int,", "type=int, default=100000 ) parser.add_argument( '--stride', type=int, default=1 ) parser.add_argument( '--skip', type=int, default=0 )", "if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument( '--pattern', type=str, required=True", "'__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument( '--pattern', type=str, required=True ) parser.add_argument( '--steps',", "with Device(scale=0.99, penup=0.4) as device: device.do_paths(paths) if __name__ == '__main__': import argparse parser", "from xy.device import Device def main(args): from modules.utils import get_paths_from_n_files as get pattern", "parser.add_argument( '--stride', type=int, default=1 ) parser.add_argument( '--skip', type=int, default=0 ) args = parser.parse_args()", "steps, stride, spatial_concat=True, spatial_concat_eps=1e-4) with Device(scale=0.99, penup=0.4) as device: device.do_paths(paths) if __name__ ==", "stride, spatial_concat=True, spatial_concat_eps=1e-4) with Device(scale=0.99, penup=0.4) as device: device.do_paths(paths) if __name__ == '__main__':", "get_paths_from_n_files as get pattern = args.pattern steps = args.steps stride = args.stride skip", "get(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4) with Device(scale=0.99, penup=0.4) as device: device.do_paths(paths) if", "main(args): from modules.utils import get_paths_from_n_files as get pattern = args.pattern steps = args.steps", "get pattern = args.pattern steps = args.steps stride = args.stride skip = args.skip", "argparse.ArgumentParser() parser.add_argument( '--pattern', type=str, required=True ) parser.add_argument( '--steps', type=int, default=100000 ) parser.add_argument( '--stride',", "spatial_concat=True, spatial_concat_eps=1e-4) with Device(scale=0.99, penup=0.4) as device: device.do_paths(paths) if __name__ == '__main__': import", "pattern = args.pattern steps = args.steps stride = args.stride skip = args.skip paths", "import Device def main(args): from modules.utils import get_paths_from_n_files as get pattern = args.pattern", "stride = args.stride skip = args.skip paths = get(pattern, skip, steps, stride, spatial_concat=True,", "= args.steps stride = args.stride skip = args.skip paths = get(pattern, skip, steps,", "skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4) with Device(scale=0.99, penup=0.4) as device: device.do_paths(paths) if __name__", "import argparse parser = argparse.ArgumentParser() parser.add_argument( '--pattern', type=str, required=True ) parser.add_argument( '--steps', type=int,", "default=100000 ) parser.add_argument( '--stride', type=int, default=1 ) parser.add_argument( '--skip', type=int, default=0 ) args", "import get_paths_from_n_files as get pattern = args.pattern steps = args.steps stride = args.stride", "Device(scale=0.99, penup=0.4) as device: device.do_paths(paths) if __name__ == '__main__': import argparse parser =", "device: device.do_paths(paths) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument( '--pattern',", "spatial_concat_eps=1e-4) with Device(scale=0.99, penup=0.4) as device: device.do_paths(paths) if __name__ == '__main__': import argparse", "args.skip paths = get(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4) with Device(scale=0.99, penup=0.4) as", "= args.skip paths = get(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4) with Device(scale=0.99, penup=0.4)", "# -*- coding: utf-8 -*- from xy.device import Device def main(args): from modules.utils", "__name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument( '--pattern', type=str, required=True )", "device.do_paths(paths) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument( '--pattern', type=str,", "utf-8 -*- from xy.device import Device def main(args): from modules.utils import get_paths_from_n_files as", ") parser.add_argument( '--stride', type=int, default=1 ) parser.add_argument( '--skip', type=int, default=0 ) args =", "def main(args): from modules.utils import get_paths_from_n_files as get pattern = args.pattern steps =", "-*- from xy.device import Device def main(args): from modules.utils import get_paths_from_n_files as get", "= get(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4) with Device(scale=0.99, penup=0.4) as device: device.do_paths(paths)", "paths = get(pattern, skip, steps, stride, spatial_concat=True, spatial_concat_eps=1e-4) with Device(scale=0.99, penup=0.4) as device:", "modules.utils import get_paths_from_n_files as get pattern = args.pattern steps = args.steps stride =" ]
[ "import wraps from benchmark import benchmark def memo(func): cache = {} @wraps(func) def", "2: return 1 else: return fib2(n-1) + fib2(n-2) def fib3(m,n): if m[n] ==", "functiontools import wraps from benchmark import benchmark def memo(func): cache = {} @wraps(func)", "{} @wraps(func) def wrap(*args): if args not in cache: cache[args] = func(*args) return", "args not in cache: cache[args] = func(*args) return cache[args] return wrap def fib(n):", "import benchmark def memo(func): cache = {} @wraps(func) def wrap(*args): if args not", "cache: cache[args] = func(*args) return cache[args] return wrap def fib(n): if n <", "+ fib(n-2) @memo def fib2(n): if n < 2: return 1 else: return", "fib(n): if n < 2: return 1 else: return fib(n-1) + fib(n-2) @memo", "m[n] == 0: m[n] = fib3(m, n-1) + fib3(m, n-2) return m[n] @benchmark", "[0] * (n+1) m[0], m[1] = 1, 1 print(fib3(m,n)) if __name__==\"__main__\": n =", "n-2) return m[n] @benchmark def test_fib(n): print(fib(n)) @benchmark def test_fib2(n): print(fib2(n)) @benchmark def", "1 else: return fib2(n-1) + fib2(n-2) def fib3(m,n): if m[n] == 0: m[n]", "== 0: m[n] = fib3(m, n-1) + fib3(m, n-2) return m[n] @benchmark def", "@memo def fib2(n): if n < 2: return 1 else: return fib2(n-1) +", "= [0] * (n+1) m[0], m[1] = 1, 1 print(fib3(m,n)) if __name__==\"__main__\": n", "memo(func): cache = {} @wraps(func) def wrap(*args): if args not in cache: cache[args]", "def fib3(m,n): if m[n] == 0: m[n] = fib3(m, n-1) + fib3(m, n-2)", "else: return fib(n-1) + fib(n-2) @memo def fib2(n): if n < 2: return", "test_fib2(n): print(fib2(n)) @benchmark def test_fib3(n): m = [0] * (n+1) m[0], m[1] =", "< 2: return 1 else: return fib2(n-1) + fib2(n-2) def fib3(m,n): if m[n]", "@wraps(func) def wrap(*args): if args not in cache: cache[args] = func(*args) return cache[args]", "@benchmark def test_fib2(n): print(fib2(n)) @benchmark def test_fib3(n): m = [0] * (n+1) m[0],", "fib3(m, n-2) return m[n] @benchmark def test_fib(n): print(fib(n)) @benchmark def test_fib2(n): print(fib2(n)) @benchmark", "return cache[args] return wrap def fib(n): if n < 2: return 1 else:", "wrap(*args): if args not in cache: cache[args] = func(*args) return cache[args] return wrap", "= {} @wraps(func) def wrap(*args): if args not in cache: cache[args] = func(*args)", "n-1) + fib3(m, n-2) return m[n] @benchmark def test_fib(n): print(fib(n)) @benchmark def test_fib2(n):", "= fib3(m, n-1) + fib3(m, n-2) return m[n] @benchmark def test_fib(n): print(fib(n)) @benchmark", "fib2(n-2) def fib3(m,n): if m[n] == 0: m[n] = fib3(m, n-1) + fib3(m,", "0: m[n] = fib3(m, n-1) + fib3(m, n-2) return m[n] @benchmark def test_fib(n):", "* (n+1) m[0], m[1] = 1, 1 print(fib3(m,n)) if __name__==\"__main__\": n = 35", "cache[args] = func(*args) return cache[args] return wrap def fib(n): if n < 2:", "from functiontools import wraps from benchmark import benchmark def memo(func): cache = {}", "def test_fib2(n): print(fib2(n)) @benchmark def test_fib3(n): m = [0] * (n+1) m[0], m[1]", "fib(n-1) + fib(n-2) @memo def fib2(n): if n < 2: return 1 else:", "fib2(n-1) + fib2(n-2) def fib3(m,n): if m[n] == 0: m[n] = fib3(m, n-1)", "func(*args) return cache[args] return wrap def fib(n): if n < 2: return 1", "return fib2(n-1) + fib2(n-2) def fib3(m,n): if m[n] == 0: m[n] = fib3(m,", "return wrap def fib(n): if n < 2: return 1 else: return fib(n-1)", "return 1 else: return fib2(n-1) + fib2(n-2) def fib3(m,n): if m[n] == 0:", "cache = {} @wraps(func) def wrap(*args): if args not in cache: cache[args] =", "m[n] @benchmark def test_fib(n): print(fib(n)) @benchmark def test_fib2(n): print(fib2(n)) @benchmark def test_fib3(n): m", "print(fib(n)) @benchmark def test_fib2(n): print(fib2(n)) @benchmark def test_fib3(n): m = [0] * (n+1)", "not in cache: cache[args] = func(*args) return cache[args] return wrap def fib(n): if", "test_fib3(n): m = [0] * (n+1) m[0], m[1] = 1, 1 print(fib3(m,n)) if", "m[0], m[1] = 1, 1 print(fib3(m,n)) if __name__==\"__main__\": n = 35 test_fib(n) test_fib2(n)", "m = [0] * (n+1) m[0], m[1] = 1, 1 print(fib3(m,n)) if __name__==\"__main__\":", "@benchmark def test_fib(n): print(fib(n)) @benchmark def test_fib2(n): print(fib2(n)) @benchmark def test_fib3(n): m =", "fib(n-2) @memo def fib2(n): if n < 2: return 1 else: return fib2(n-1)", "n < 2: return 1 else: return fib2(n-1) + fib2(n-2) def fib3(m,n): if", "if n < 2: return 1 else: return fib2(n-1) + fib2(n-2) def fib3(m,n):", "return 1 else: return fib(n-1) + fib(n-2) @memo def fib2(n): if n <", "@benchmark def test_fib3(n): m = [0] * (n+1) m[0], m[1] = 1, 1", "if m[n] == 0: m[n] = fib3(m, n-1) + fib3(m, n-2) return m[n]", "def fib(n): if n < 2: return 1 else: return fib(n-1) + fib(n-2)", "cache[args] return wrap def fib(n): if n < 2: return 1 else: return", "< 2: return 1 else: return fib(n-1) + fib(n-2) @memo def fib2(n): if", "else: return fib2(n-1) + fib2(n-2) def fib3(m,n): if m[n] == 0: m[n] =", "+ fib3(m, n-2) return m[n] @benchmark def test_fib(n): print(fib(n)) @benchmark def test_fib2(n): print(fib2(n))", "fib3(m, n-1) + fib3(m, n-2) return m[n] @benchmark def test_fib(n): print(fib(n)) @benchmark def", "benchmark import benchmark def memo(func): cache = {} @wraps(func) def wrap(*args): if args", "return m[n] @benchmark def test_fib(n): print(fib(n)) @benchmark def test_fib2(n): print(fib2(n)) @benchmark def test_fib3(n):", "def test_fib(n): print(fib(n)) @benchmark def test_fib2(n): print(fib2(n)) @benchmark def test_fib3(n): m = [0]", "m[n] = fib3(m, n-1) + fib3(m, n-2) return m[n] @benchmark def test_fib(n): print(fib(n))", "= func(*args) return cache[args] return wrap def fib(n): if n < 2: return", "(n+1) m[0], m[1] = 1, 1 print(fib3(m,n)) if __name__==\"__main__\": n = 35 test_fib(n)", "fib3(m,n): if m[n] == 0: m[n] = fib3(m, n-1) + fib3(m, n-2) return", "1 else: return fib(n-1) + fib(n-2) @memo def fib2(n): if n < 2:", "def test_fib3(n): m = [0] * (n+1) m[0], m[1] = 1, 1 print(fib3(m,n))", "+ fib2(n-2) def fib3(m,n): if m[n] == 0: m[n] = fib3(m, n-1) +", "in cache: cache[args] = func(*args) return cache[args] return wrap def fib(n): if n", "benchmark def memo(func): cache = {} @wraps(func) def wrap(*args): if args not in", "def wrap(*args): if args not in cache: cache[args] = func(*args) return cache[args] return", "return fib(n-1) + fib(n-2) @memo def fib2(n): if n < 2: return 1", "wraps from benchmark import benchmark def memo(func): cache = {} @wraps(func) def wrap(*args):", "from benchmark import benchmark def memo(func): cache = {} @wraps(func) def wrap(*args): if", "n < 2: return 1 else: return fib(n-1) + fib(n-2) @memo def fib2(n):", "test_fib(n): print(fib(n)) @benchmark def test_fib2(n): print(fib2(n)) @benchmark def test_fib3(n): m = [0] *", "2: return 1 else: return fib(n-1) + fib(n-2) @memo def fib2(n): if n", "m[1] = 1, 1 print(fib3(m,n)) if __name__==\"__main__\": n = 35 test_fib(n) test_fib2(n) test_fib3(n)", "print(fib2(n)) @benchmark def test_fib3(n): m = [0] * (n+1) m[0], m[1] = 1,", "if args not in cache: cache[args] = func(*args) return cache[args] return wrap def", "def memo(func): cache = {} @wraps(func) def wrap(*args): if args not in cache:", "fib2(n): if n < 2: return 1 else: return fib2(n-1) + fib2(n-2) def", "def fib2(n): if n < 2: return 1 else: return fib2(n-1) + fib2(n-2)", "wrap def fib(n): if n < 2: return 1 else: return fib(n-1) +", "if n < 2: return 1 else: return fib(n-1) + fib(n-2) @memo def" ]
[ "20, \"influence\": 50}, {\"name\": \"joe\", \"strength\": 23, \"friendliness\": 79, \"influence\": 30} ] #", "{\"name\": \"library\"}, {\"name\": \"solitary\"}, {\"name\": \"office\"}, ] # Prison schedule data schedule =", "= [ {\"location\": \"cells\", \"duration\": 1}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"laundry\", \"duration\":", "= [ {\"name\": \"mcferrin\", \"friendliness\": 13}, {\"name\": \"douglas\", \"friendliness\": 5}, {\"name\": \"hamilton\", \"friendliness\":", "rooms = [ {\"name\": \"cells\"}, {\"name\": \"cafeteria\"}, {\"name\": \"yard\"}, {\"name\": \"laundry\"}, {\"name\": \"library\"},", "room data rooms = [ {\"name\": \"cells\"}, {\"name\": \"cafeteria\"}, {\"name\": \"yard\"}, {\"name\": \"laundry\"},", "\"solitary\"}, {\"name\": \"office\"}, ] # Prison schedule data schedule = [ {\"location\": \"cells\",", "\"friendliness\": 26} ] # Prison room data rooms = [ {\"name\": \"cells\"}, {\"name\":", "] # Prison room data rooms = [ {\"name\": \"cells\"}, {\"name\": \"cafeteria\"}, {\"name\":", "3}, {\"location\": \"laundry\", \"duration\": 2}, {\"location\": \"yard\", \"duration\": 5}, {\"location\": \"cafeteria\", \"duration\": 3},", "{\"name\": \"laundry\"}, {\"name\": \"library\"}, {\"name\": \"solitary\"}, {\"name\": \"office\"}, ] # Prison schedule data", "{\"name\": \"cafeteria\"}, {\"name\": \"yard\"}, {\"name\": \"laundry\"}, {\"name\": \"library\"}, {\"name\": \"solitary\"}, {\"name\": \"office\"}, ]", "{\"name\": \"solitary\"}, {\"name\": \"office\"}, ] # Prison schedule data schedule = [ {\"location\":", "] # Prison schedule data schedule = [ {\"location\": \"cells\", \"duration\": 1}, {\"location\":", "{\"name\": \"yard\"}, {\"name\": \"laundry\"}, {\"name\": \"library\"}, {\"name\": \"solitary\"}, {\"name\": \"office\"}, ] # Prison", "\"friendliness\": 13}, {\"name\": \"douglas\", \"friendliness\": 5}, {\"name\": \"hamilton\", \"friendliness\": 26} ] # Prison", "# Inmates data inmates = [ {\"name\": \"billy\", \"strength\": 55, \"friendliness\": 40, \"influence\":", "\"laundry\", \"duration\": 2}, {\"location\": \"yard\", \"duration\": 5}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"library\",", "data guards = [ {\"name\": \"mcferrin\", \"friendliness\": 13}, {\"name\": \"douglas\", \"friendliness\": 5}, {\"name\":", "<reponame>Positron11/prison-breakout-game # Inmates data inmates = [ {\"name\": \"billy\", \"strength\": 55, \"friendliness\": 40,", "3}, {\"location\": \"library\", \"duration\": 6}, {\"location\": \"cafeteria\", \"duration\": 4}, {\"location\": \"cells\", \"duration\": 2},", "# Guards data guards = [ {\"name\": \"mcferrin\", \"friendliness\": 13}, {\"name\": \"douglas\", \"friendliness\":", "\"influence\": 50}, {\"name\": \"joe\", \"strength\": 23, \"friendliness\": 79, \"influence\": 30} ] # Guards", "5}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"library\", \"duration\": 6}, {\"location\": \"cafeteria\", \"duration\": 4},", "70, \"friendliness\": 20, \"influence\": 50}, {\"name\": \"joe\", \"strength\": 23, \"friendliness\": 79, \"influence\": 30}", "# Prison schedule data schedule = [ {\"location\": \"cells\", \"duration\": 1}, {\"location\": \"cafeteria\",", "{\"name\": \"joe\", \"strength\": 23, \"friendliness\": 79, \"influence\": 30} ] # Guards data guards", "40, \"influence\": 20}, {\"name\": \"bob\", \"strength\": 70, \"friendliness\": 20, \"influence\": 50}, {\"name\": \"joe\",", "\"hamilton\", \"friendliness\": 26} ] # Prison room data rooms = [ {\"name\": \"cells\"},", "Prison schedule data schedule = [ {\"location\": \"cells\", \"duration\": 1}, {\"location\": \"cafeteria\", \"duration\":", "\"friendliness\": 40, \"influence\": 20}, {\"name\": \"bob\", \"strength\": 70, \"friendliness\": 20, \"influence\": 50}, {\"name\":", "\"strength\": 55, \"friendliness\": 40, \"influence\": 20}, {\"name\": \"bob\", \"strength\": 70, \"friendliness\": 20, \"influence\":", "schedule data schedule = [ {\"location\": \"cells\", \"duration\": 1}, {\"location\": \"cafeteria\", \"duration\": 3},", "[ {\"location\": \"cells\", \"duration\": 1}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"laundry\", \"duration\": 2},", "\"mcferrin\", \"friendliness\": 13}, {\"name\": \"douglas\", \"friendliness\": 5}, {\"name\": \"hamilton\", \"friendliness\": 26} ] #", "1}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"laundry\", \"duration\": 2}, {\"location\": \"yard\", \"duration\": 5},", "= [ {\"name\": \"billy\", \"strength\": 55, \"friendliness\": 40, \"influence\": 20}, {\"name\": \"bob\", \"strength\":", "\"friendliness\": 79, \"influence\": 30} ] # Guards data guards = [ {\"name\": \"mcferrin\",", "50}, {\"name\": \"joe\", \"strength\": 23, \"friendliness\": 79, \"influence\": 30} ] # Guards data", "{\"location\": \"library\", \"duration\": 6}, {\"location\": \"cafeteria\", \"duration\": 4}, {\"location\": \"cells\", \"duration\": 2}, ]", "{\"location\": \"cells\", \"duration\": 1}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"laundry\", \"duration\": 2}, {\"location\":", "\"duration\": 3}, {\"location\": \"library\", \"duration\": 6}, {\"location\": \"cafeteria\", \"duration\": 4}, {\"location\": \"cells\", \"duration\":", "{\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"laundry\", \"duration\": 2}, {\"location\": \"yard\", \"duration\": 5}, {\"location\":", "\"duration\": 3}, {\"location\": \"laundry\", \"duration\": 2}, {\"location\": \"yard\", \"duration\": 5}, {\"location\": \"cafeteria\", \"duration\":", "\"yard\", \"duration\": 5}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"library\", \"duration\": 6}, {\"location\": \"cafeteria\",", "13}, {\"name\": \"douglas\", \"friendliness\": 5}, {\"name\": \"hamilton\", \"friendliness\": 26} ] # Prison room", "\"influence\": 20}, {\"name\": \"bob\", \"strength\": 70, \"friendliness\": 20, \"influence\": 50}, {\"name\": \"joe\", \"strength\":", "30} ] # Guards data guards = [ {\"name\": \"mcferrin\", \"friendliness\": 13}, {\"name\":", "\"billy\", \"strength\": 55, \"friendliness\": 40, \"influence\": 20}, {\"name\": \"bob\", \"strength\": 70, \"friendliness\": 20,", "55, \"friendliness\": 40, \"influence\": 20}, {\"name\": \"bob\", \"strength\": 70, \"friendliness\": 20, \"influence\": 50},", "[ {\"name\": \"mcferrin\", \"friendliness\": 13}, {\"name\": \"douglas\", \"friendliness\": 5}, {\"name\": \"hamilton\", \"friendliness\": 26}", "data rooms = [ {\"name\": \"cells\"}, {\"name\": \"cafeteria\"}, {\"name\": \"yard\"}, {\"name\": \"laundry\"}, {\"name\":", "\"office\"}, ] # Prison schedule data schedule = [ {\"location\": \"cells\", \"duration\": 1},", "{\"location\": \"laundry\", \"duration\": 2}, {\"location\": \"yard\", \"duration\": 5}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\":", "{\"name\": \"mcferrin\", \"friendliness\": 13}, {\"name\": \"douglas\", \"friendliness\": 5}, {\"name\": \"hamilton\", \"friendliness\": 26} ]", "[ {\"name\": \"cells\"}, {\"name\": \"cafeteria\"}, {\"name\": \"yard\"}, {\"name\": \"laundry\"}, {\"name\": \"library\"}, {\"name\": \"solitary\"},", "2}, {\"location\": \"yard\", \"duration\": 5}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"library\", \"duration\": 6},", "{\"name\": \"billy\", \"strength\": 55, \"friendliness\": 40, \"influence\": 20}, {\"name\": \"bob\", \"strength\": 70, \"friendliness\":", "inmates = [ {\"name\": \"billy\", \"strength\": 55, \"friendliness\": 40, \"influence\": 20}, {\"name\": \"bob\",", "5}, {\"name\": \"hamilton\", \"friendliness\": 26} ] # Prison room data rooms = [", "\"cafeteria\"}, {\"name\": \"yard\"}, {\"name\": \"laundry\"}, {\"name\": \"library\"}, {\"name\": \"solitary\"}, {\"name\": \"office\"}, ] #", "{\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"library\", \"duration\": 6}, {\"location\": \"cafeteria\", \"duration\": 4}, {\"location\":", "[ {\"name\": \"billy\", \"strength\": 55, \"friendliness\": 40, \"influence\": 20}, {\"name\": \"bob\", \"strength\": 70,", "Guards data guards = [ {\"name\": \"mcferrin\", \"friendliness\": 13}, {\"name\": \"douglas\", \"friendliness\": 5},", "data schedule = [ {\"location\": \"cells\", \"duration\": 1}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\":", "\"laundry\"}, {\"name\": \"library\"}, {\"name\": \"solitary\"}, {\"name\": \"office\"}, ] # Prison schedule data schedule", "Inmates data inmates = [ {\"name\": \"billy\", \"strength\": 55, \"friendliness\": 40, \"influence\": 20},", "\"cells\"}, {\"name\": \"cafeteria\"}, {\"name\": \"yard\"}, {\"name\": \"laundry\"}, {\"name\": \"library\"}, {\"name\": \"solitary\"}, {\"name\": \"office\"},", "] # Guards data guards = [ {\"name\": \"mcferrin\", \"friendliness\": 13}, {\"name\": \"douglas\",", "Prison room data rooms = [ {\"name\": \"cells\"}, {\"name\": \"cafeteria\"}, {\"name\": \"yard\"}, {\"name\":", "20}, {\"name\": \"bob\", \"strength\": 70, \"friendliness\": 20, \"influence\": 50}, {\"name\": \"joe\", \"strength\": 23,", "guards = [ {\"name\": \"mcferrin\", \"friendliness\": 13}, {\"name\": \"douglas\", \"friendliness\": 5}, {\"name\": \"hamilton\",", "\"library\"}, {\"name\": \"solitary\"}, {\"name\": \"office\"}, ] # Prison schedule data schedule = [", "\"bob\", \"strength\": 70, \"friendliness\": 20, \"influence\": 50}, {\"name\": \"joe\", \"strength\": 23, \"friendliness\": 79,", "{\"name\": \"cells\"}, {\"name\": \"cafeteria\"}, {\"name\": \"yard\"}, {\"name\": \"laundry\"}, {\"name\": \"library\"}, {\"name\": \"solitary\"}, {\"name\":", "\"duration\": 2}, {\"location\": \"yard\", \"duration\": 5}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"library\", \"duration\":", "\"duration\": 1}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"laundry\", \"duration\": 2}, {\"location\": \"yard\", \"duration\":", "# Prison room data rooms = [ {\"name\": \"cells\"}, {\"name\": \"cafeteria\"}, {\"name\": \"yard\"},", "data inmates = [ {\"name\": \"billy\", \"strength\": 55, \"friendliness\": 40, \"influence\": 20}, {\"name\":", "{\"name\": \"hamilton\", \"friendliness\": 26} ] # Prison room data rooms = [ {\"name\":", "79, \"influence\": 30} ] # Guards data guards = [ {\"name\": \"mcferrin\", \"friendliness\":", "\"joe\", \"strength\": 23, \"friendliness\": 79, \"influence\": 30} ] # Guards data guards =", "\"friendliness\": 20, \"influence\": 50}, {\"name\": \"joe\", \"strength\": 23, \"friendliness\": 79, \"influence\": 30} ]", "\"influence\": 30} ] # Guards data guards = [ {\"name\": \"mcferrin\", \"friendliness\": 13},", "\"strength\": 23, \"friendliness\": 79, \"influence\": 30} ] # Guards data guards = [", "\"cafeteria\", \"duration\": 3}, {\"location\": \"laundry\", \"duration\": 2}, {\"location\": \"yard\", \"duration\": 5}, {\"location\": \"cafeteria\",", "23, \"friendliness\": 79, \"influence\": 30} ] # Guards data guards = [ {\"name\":", "26} ] # Prison room data rooms = [ {\"name\": \"cells\"}, {\"name\": \"cafeteria\"},", "{\"name\": \"bob\", \"strength\": 70, \"friendliness\": 20, \"influence\": 50}, {\"name\": \"joe\", \"strength\": 23, \"friendliness\":", "\"yard\"}, {\"name\": \"laundry\"}, {\"name\": \"library\"}, {\"name\": \"solitary\"}, {\"name\": \"office\"}, ] # Prison schedule", "schedule = [ {\"location\": \"cells\", \"duration\": 1}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"laundry\",", "\"douglas\", \"friendliness\": 5}, {\"name\": \"hamilton\", \"friendliness\": 26} ] # Prison room data rooms", "\"strength\": 70, \"friendliness\": 20, \"influence\": 50}, {\"name\": \"joe\", \"strength\": 23, \"friendliness\": 79, \"influence\":", "{\"name\": \"office\"}, ] # Prison schedule data schedule = [ {\"location\": \"cells\", \"duration\":", "= [ {\"name\": \"cells\"}, {\"name\": \"cafeteria\"}, {\"name\": \"yard\"}, {\"name\": \"laundry\"}, {\"name\": \"library\"}, {\"name\":", "\"cafeteria\", \"duration\": 3}, {\"location\": \"library\", \"duration\": 6}, {\"location\": \"cafeteria\", \"duration\": 4}, {\"location\": \"cells\",", "\"cells\", \"duration\": 1}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"laundry\", \"duration\": 2}, {\"location\": \"yard\",", "{\"name\": \"douglas\", \"friendliness\": 5}, {\"name\": \"hamilton\", \"friendliness\": 26} ] # Prison room data", "\"friendliness\": 5}, {\"name\": \"hamilton\", \"friendliness\": 26} ] # Prison room data rooms =", "{\"location\": \"yard\", \"duration\": 5}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"library\", \"duration\": 6}, {\"location\":", "\"duration\": 5}, {\"location\": \"cafeteria\", \"duration\": 3}, {\"location\": \"library\", \"duration\": 6}, {\"location\": \"cafeteria\", \"duration\":" ]
[ "result_sub_sample_**.pickle import utils.match as mtc import pickle #from utils import hagn import os", "mainarr[field][:,i]) else: if do_smooth: r_p = smooth(mainarr[field], window_len=5, clip_tail_zeros=False) # odd number results", "analized, and found in the result_lambda pickle. Those repeatition must be removed from", "in each nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for result_thisnout in allresults: Allallidxs.append(np.array([agal.idx for agal in", "load.info import Info def fill_main(mainarr, nnza_cell, tc): # Set up a new array.", "do_smooth=True): finetree=this_gal.maintree mainarr = this_gal.main_arr finearr = np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp = list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"]", "== mar[\"nout\"]] = mar[\"vel\"] for field in fields_interp: # Begining of merger if", "finearr[field] = np.interp(lbt, lbt_cell, r_p) return finearr def serialize(allresults, all_final_idxs, nnza_all, nnza_cell, istep_max", "do_smooth: r_p = smooth(mainarr[field], window_len=5, clip_tail_zeros=False) # odd number results in +1 element", "exist. finearr[\"vel\"] = finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field in [\"id\", \"idx\", \"pos\", \"vel\",", "lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] = recf.append_fields(adp[0][0], \"time\", lbt) max_step=len(allresults) this_gal = serialize_results.Serial_result(adp) cnt_merger=0", "if nout in nouts: #print(nout, ss[\"idx\"]) istep_cell = np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell]", "nnza_cell, tc) this_gal.finearr = interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True) #print(\"BAD\", bad_main) elif len(sat_results)", "import utils.match as mtc import pickle #from utils import hagn import os from", "\"nstep\") interp_fields = list(this_gal.main_arr.dtype.names) for field in [\"nout\", \"nstep\"]: interp_fields.remove(field) lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"),", "the middle. continue #print(i, \"IDX=\",this_idx) adp = pickle.load(open(fname, \"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0])", "= mainarr[field] interp_fields.remove(field) for field in interp_fields: if mainarr[field].ndim == 2: for i", "> nstep_too_short_main: print(\"Too short main tree. SKIP\") continue # Append age to maintree", "nnza_cell, istep_max = 50, prg_dir=\"./\", out_base=\"./\", nstep_too_short_main = 100): nouts = nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering", "np.interp(lbt, lbt_cell, r_p) else: r_p = mainarr[field][:,i] finearr[field][:,i] = np.interp(lbt, lbt_cell, mainarr[field][:,i]) else:", "result_lambda pickle. Those repeatition must be removed from the begining. Until then, go", "np.interp(lbt_new, lbt_org, mainarr[field][:,i]) else: r_p = mainarr[field] newarr[field] = np.interp(lbt_new, lbt_org, r_p) return", "= fill_main(this_gal.main_arr, nnza_cell, tc) this_gal.finearr = interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True) #print(\"BAD\", bad_main)", "and dump. # Chunks of results in each nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for result_thisnout", "mar[\"vel\"] for field in fields_interp: # Begining of merger if mainarr[field].ndim == 2:", "finetree[\"pos\"] # Pos and vel can be overwritten if a better measurement from", "[\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] = mainarr[field] interp_fields.remove(field) for field in interp_fields: if mainarr[field].ndim", "i_result) if len(i_result) > 0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results) # Merger properties", "the smoothed array. else: r_p = mainarr[field] finearr[field] = np.interp(lbt, lbt_cell, r_p) return", "\"wb\")) all_fidx_ok.append(this_idx) #break np.savetxt(serial_out_dir+\"all_fidx_ok.txt\", all_fidx_ok, fmt=\"%d\") return [pickle.load(open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"rb\")) for this_idx in all_fidx_ok]", "fill_main(mainarr, nnza_cell, tc): # Set up a new array. new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr", "NOT nnza_cell. #print(nout) if nout in nouts: #print(nout, ss[\"idx\"]) istep_cell = np.where(nnza_cell.nnza[\"nout\"] ==", "= np.interp(lbt, lbt_cell, mainarr[field][:,i]) else: if do_smooth: r_p = smooth(mainarr[field], window_len=5, clip_tail_zeros=False) #", "not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) # Build serial results and dump. # Chunks of results", "as ccm import numpy.lib.recfunctions as recf from utils import cosmology from load.info import", "ss in sat: nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT nnza_cell. #print(nout) if nout in nouts: #print(nout,", "Until then, go through one more step to remove duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir +", "to maintree and mainresult. lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] = recf.append_fields(adp[0][0], \"time\", lbt) max_step=len(allresults)", "len(sat_results) > 0 and sat_results[0].mstar > 0.0: #print(\"merger2\") this_gal.add_merger(sat_results, sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, #", "hagn import os from rot2.analysis import * from rot2 import serialize_results #import tree.halomodule", "\"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] = mainarr[field] interp_fields.remove(field) for field in interp_fields: if mainarr[field].ndim ==", "break #print(i) this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx) #break np.savetxt(serial_out_dir+\"all_fidx_ok.txt\", all_fidx_ok, fmt=\"%d\") return [pickle.load(open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx),", "for i in range(3): r_p = mainarr[field][:,i] newarr[field][:,i] = np.interp(lbt_new, lbt_org, mainarr[field][:,i]) else:", "2: for i in range(3): if do_smooth: r_p = smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False) finearr[field][:,i]", "import numpy.lib.recfunctions as recf from utils import cosmology from load.info import Info def", "max_step=len(allresults) this_gal = serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False for i, this_sats in enumerate(adp): nout_results=[] for", "= finetree[\"id\"] finearr[\"idx\"] = finetree[\"idx\"] finearr[\"pos\"] = finetree[\"pos\"] # Pos and vel can", "import cosmology from load.info import Info def fill_main(mainarr, nnza_cell, tc): # Set up", "pickle. Those repeatition must be removed from the begining. Until then, go through", "this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell, tc) this_gal.finearr = interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True) #print(\"BAD\",", "Pos and vel can be overwritten if a better measurement from galaxy proeprty", "50, prg_dir=\"./\", out_base=\"./\", nstep_too_short_main = 100): nouts = nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts: \", nouts)", "\"rb\")) serial_out_dir = out_base+\"result_serial/\" if not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) # Build serial results and", "newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype) # It's easy to fill nouts and nsteps. newarr[\"nout\"]=new_nouts", "found in the result_lambda pickle. Those repeatition must be removed from the begining.", "r_p) else: r_p = mainarr[field][:,i] finearr[field][:,i] = np.interp(lbt, lbt_cell, mainarr[field][:,i]) else: if do_smooth:", "<gh_stars>1-10 import numpy as np import galaxymodule # needed for result_sub_sample_**.pickle import utils.match", "istep_cell = np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result = np.where(allresults_now_idx == ss[\"idx\"])[0] #print(\"len", "galaxy results on finetree. def interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True): finetree=this_gal.maintree mainarr =", "nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts: \", nouts) \"\"\" For an unknown reason, some of galaxies", "if not os.path.isfile(fname): # dump_prgs broken in the middle. continue #print(i, \"IDX=\",this_idx) adp", "np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result = np.where(allresults_now_idx == ss[\"idx\"])[0] #print(\"len results\", len(allresults_now),", "#bad_main=True this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell, tc) this_gal.finearr = interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True)", "if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell, tc) this_gal.finearr = interpol_fine(this_gal,", "newarr[field][:,i] = np.interp(lbt_new, lbt_org, mainarr[field][:,i]) else: r_p = mainarr[field] newarr[field] = np.interp(lbt_new, lbt_org,", "r_p = smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False) finearr[field][:,i] = np.interp(lbt, lbt_cell, r_p) else: r_p =", "if mainarr[field].ndim == 2: for i in range(3): if do_smooth: r_p = smooth(mainarr[field][:,i],", "adp[0][0] = recf.append_fields(adp[0][0], \"time\", lbt) max_step=len(allresults) this_gal = serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False for i,", "if do_smooth: r_p = smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False) finearr[field][:,i] = np.interp(lbt, lbt_cell, r_p) else:", "a better measurement from galaxy proeprty exist. finearr[\"vel\"] = finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for", "finetree[\"idx\"] finearr[\"pos\"] = finetree[\"pos\"] # Pos and vel can be overwritten if a", "i in range(3): r_p = mainarr[field][:,i] newarr[field][:,i] = np.interp(lbt_new, lbt_org, mainarr[field][:,i]) else: r_p", "= np.zeros(len(new_nouts), dtype=mainarr.dtype) # It's easy to fill nouts and nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"],", "Chunks of results in each nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for result_thisnout in allresults: Allallidxs.append(np.array([agal.idx", "* from rot2 import serialize_results #import tree.halomodule as hmo #from rot2 import cell_chunk_module", "mar in mainarr: finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"vel\"]", "from utils import cosmology from load.info import Info def fill_main(mainarr, nnza_cell, tc): #", "hmo #from rot2 import cell_chunk_module as ccm import numpy.lib.recfunctions as recf from utils", "in result_thisnout])) Allallids=[] for result_thisnout in allresults: Allallids.append(np.array([agal.id for agal in result_thisnout])) info", "lbt) max_step=len(allresults) this_gal = serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False for i, this_sats in enumerate(adp): nout_results=[]", "in fields_interp: # Begining of merger if mainarr[field].ndim == 2: for i in", "window_len=5, clip_tail_zeros=False) finearr[field][:,i] = np.interp(lbt, lbt_cell, r_p) else: r_p = mainarr[field][:,i] finearr[field][:,i] =", "in range(3): r_p = mainarr[field][:,i] newarr[field][:,i] = np.interp(lbt_new, lbt_org, mainarr[field][:,i]) else: r_p =", "up a new array. new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype) # It's", "print(\"Considering nouts: \", nouts) \"\"\" For an unknown reason, some of galaxies are", "enumerate(adp): nout_results=[] for sat in this_sats: sat_results=[] for ss in sat: nout=nnza_all.step2out([ss[\"nstep\"]]) #", "maintree and mainresult. lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] = recf.append_fields(adp[0][0], \"time\", lbt) max_step=len(allresults) this_gal", "field in [\"nout\", \"nstep\"]: interp_fields.remove(field) lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0)", "overwritten if a better measurement from galaxy proeprty exist. finearr[\"vel\"] = finetree[\"vel\"] #", "SKIP\") continue # Append age to maintree and mainresult. lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0]", "#print(nout) if nout in nouts: #print(nout, ss[\"idx\"]) istep_cell = np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0] allresults_now=allresults[istep_cell]", "galaxy proeprty exist. finearr[\"vel\"] = finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field in [\"id\", \"idx\",", "len(allresults_now), \"i_result\", i_result) if len(i_result) > 0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results) #", "= out_base+\"result_serial/\" if not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) # Build serial results and dump. #", "for i, this_idx in enumerate(all_final_idxs): fname = prg_dir + \"{}_adp.pickle\".format(this_idx) if not os.path.isfile(fname):", "remove duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\", \"rb\")) serial_out_dir = out_base+\"result_serial/\" if not os.path.isdir(serial_out_dir):", "= interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True) #print(\"BAD\", bad_main) elif len(sat_results) > 0 and", "for sat in this_sats: sat_results=[] for ss in sat: nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT nnza_cell.", "to fill nouts and nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\") interp_fields = list(this_gal.main_arr.dtype.names) for", "import pickle #from utils import hagn import os from rot2.analysis import * from", "\"nstep\", \"nout\"]: fields_interp.remove(field) lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for mar", "odd number results in +1 element in the smoothed array. else: r_p =", "newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] = mainarr[field] interp_fields.remove(field) for field in interp_fields: if mainarr[field].ndim == 2:", "import cell_chunk_module as ccm import numpy.lib.recfunctions as recf from utils import cosmology from", "nout_results.append(sat_results) # Merger properties if i == 0: this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep),", "allresults: Allallidxs.append(np.array([agal.idx for agal in result_thisnout])) Allallids=[] for result_thisnout in allresults: Allallids.append(np.array([agal.id for", "results in +1 element in the smoothed array. else: r_p = mainarr[field] finearr[field]", "== nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result = np.where(allresults_now_idx == ss[\"idx\"])[0] #print(\"len results\", len(allresults_now), \"i_result\",", "= smooth(mainarr[field], window_len=5, clip_tail_zeros=False) # odd number results in +1 element in the", "in the result_lambda pickle. Those repeatition must be removed from the begining. Until", "nnza_cell. #print(nout) if nout in nouts: #print(nout, ss[\"idx\"]) istep_cell = np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0]", "= this_gal.main_arr finearr = np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp = list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] = finetree[\"id\"] finearr[\"idx\"]", "in the smoothed array. else: r_p = mainarr[field] finearr[field] = np.interp(lbt, lbt_cell, r_p)", "= serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False for i, this_sats in enumerate(adp): nout_results=[] for sat in", "from rot2.analysis import * from rot2 import serialize_results #import tree.halomodule as hmo #from", "lbt_org, mainarr[field][:,i]) else: r_p = mainarr[field] newarr[field] = np.interp(lbt_new, lbt_org, r_p) return newarr", "finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"vel\"] for field in fields_interp: # Begining of merger", "mainarr: finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"vel\"] for field", "tc, do_smooth=True) #print(\"BAD\", bad_main) elif len(sat_results) > 0 and sat_results[0].mstar > 0.0: #print(\"merger2\")", "\"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0]) > nstep_too_short_main: print(\"Too short main tree. SKIP\") continue", "repeatedly analized, and found in the result_lambda pickle. Those repeatition must be removed", "mainarr[field][:,i]) else: r_p = mainarr[field] newarr[field] = np.interp(lbt_new, lbt_org, r_p) return newarr #", "field in fields_interp: # Begining of merger if mainarr[field].ndim == 2: for i", "smoothed array. else: r_p = mainarr[field] finearr[field] = np.interp(lbt, lbt_cell, r_p) return finearr", "nouts = nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts: \", nouts) \"\"\" For an unknown reason, some", "nstep_too_short_main: print(\"Too short main tree. SKIP\") continue # Append age to maintree and", "sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results) # Merger properties if i == 0: this_gal.main_arr", "for field in interp_fields: if mainarr[field].ndim == 2: for i in range(3): r_p", "# Pos and vel can be overwritten if a better measurement from galaxy", "lbt_cell, r_p) return finearr def serialize(allresults, all_final_idxs, nnza_all, nnza_cell, istep_max = 50, prg_dir=\"./\",", "for field in [\"nout\", \"nstep\"]: interp_fields.remove(field) lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"),", "tc, do_smooth=True): finetree=this_gal.maintree mainarr = this_gal.main_arr finearr = np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp = list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"]", "if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0]) > nstep_too_short_main: print(\"Too short main tree. SKIP\") continue #", "Those repeatition must be removed from the begining. Until then, go through one", "enumerate(all_final_idxs): fname = prg_dir + \"{}_adp.pickle\".format(this_idx) if not os.path.isfile(fname): # dump_prgs broken in", "rot2 import cell_chunk_module as ccm import numpy.lib.recfunctions as recf from utils import cosmology", "import numpy as np import galaxymodule # needed for result_sub_sample_**.pickle import utils.match as", "for field in [\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] = mainarr[field] interp_fields.remove(field) for field in", "else: r_p = mainarr[field] newarr[field] = np.interp(lbt_new, lbt_org, r_p) return newarr # interpolate", "range(3): if do_smooth: r_p = smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False) finearr[field][:,i] = np.interp(lbt, lbt_cell, r_p)", "== 0: this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp(): #bad_main=True", "finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field in [\"id\", \"idx\", \"pos\", \"vel\", \"nstep\", \"nout\"]: fields_interp.remove(field)", "nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for result_thisnout in allresults: Allallidxs.append(np.array([agal.idx for agal in result_thisnout])) Allallids=[]", "\"\"\" all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\", \"rb\")) serial_out_dir = out_base+\"result_serial/\" if not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) #", "= mainarr[field][:,i] newarr[field][:,i] = np.interp(lbt_new, lbt_org, mainarr[field][:,i]) else: r_p = mainarr[field] newarr[field] =", "= nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts: \", nouts) \"\"\" For an unknown reason, some of", "this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell, tc) this_gal.finearr = interpol_fine(this_gal, nnza_cell, nnza_all, tc,", "#print(i) this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx) #break np.savetxt(serial_out_dir+\"all_fidx_ok.txt\", all_fidx_ok, fmt=\"%d\") return [pickle.load(open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"rb\"))", "cnt_merger=0 bad_main=False for i, this_sats in enumerate(adp): nout_results=[] for sat in this_sats: sat_results=[]", "# dump_prgs broken in the middle. continue #print(i, \"IDX=\",this_idx) adp = pickle.load(open(fname, \"rb\"))", "import os from rot2.analysis import * from rot2 import serialize_results #import tree.halomodule as", "mainarr = this_gal.main_arr finearr = np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp = list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] = finetree[\"id\"]", "print(\"Bad main. Break\") break #print(i) this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx) #break np.savetxt(serial_out_dir+\"all_fidx_ok.txt\", all_fidx_ok,", "np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp = list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] = finetree[\"id\"] finearr[\"idx\"] = finetree[\"idx\"] finearr[\"pos\"] =", "== ss[\"idx\"])[0] #print(\"len results\", len(allresults_now), \"i_result\", i_result) if len(i_result) > 0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout)", "Info(nout=nouts[0]) tc = cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[] all_fidx_ok=[] for i, this_idx in enumerate(all_final_idxs): fname", "finearr[\"vel\"] = finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field in [\"id\", \"idx\", \"pos\", \"vel\", \"nstep\",", "= prg_dir + \"{}_adp.pickle\".format(this_idx) if not os.path.isfile(fname): # dump_prgs broken in the middle.", "np.zeros(len(new_nouts), dtype=mainarr.dtype) # It's easy to fill nouts and nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\",", "new array. new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype) # It's easy to", "each nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for result_thisnout in allresults: Allallidxs.append(np.array([agal.idx for agal in result_thisnout]))", "finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] = finetree[\"id\"] finearr[\"idx\"] = finetree[\"idx\"] finearr[\"pos\"] = finetree[\"pos\"] # Pos and", "in [\"id\", \"idx\", \"pos\", \"vel\", \"nstep\", \"nout\"]: fields_interp.remove(field) lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell", "nouts: #print(nout, ss[\"idx\"]) istep_cell = np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result = np.where(allresults_now_idx", "agal in result_thisnout])) info = Info(nout=nouts[0]) tc = cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[] all_fidx_ok=[] for", "lbt_cell, mainarr[field][:,i]) else: if do_smooth: r_p = smooth(mainarr[field], window_len=5, clip_tail_zeros=False) # odd number", "window_len=5, clip_tail_zeros=False) # odd number results in +1 element in the smoothed array.", "np.interp(lbt, lbt_cell, mainarr[field][:,i]) else: if do_smooth: r_p = smooth(mainarr[field], window_len=5, clip_tail_zeros=False) # odd", "newarr # interpolate main galaxy results on finetree. def interpol_fine(this_gal, nnza_cell, nnza_all, tc,", "more step to remove duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\", \"rb\")) serial_out_dir = out_base+\"result_serial/\"", "in allresults: Allallids.append(np.array([agal.id for agal in result_thisnout])) info = Info(nout=nouts[0]) tc = cosmology.Timeconvert(info,", "age to maintree and mainresult. lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] = recf.append_fields(adp[0][0], \"time\", lbt)", "os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) # Build serial results and dump. # Chunks of results in", "in this_sats: sat_results=[] for ss in sat: nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT nnza_cell. #print(nout) if", "0]) > nstep_too_short_main: print(\"Too short main tree. SKIP\") continue # Append age to", "be removed from the begining. Until then, go through one more step to", "numpy.lib.recfunctions as recf from utils import cosmology from load.info import Info def fill_main(mainarr,", "easy to fill nouts and nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\") interp_fields = list(this_gal.main_arr.dtype.names)", "cell_chunk_module as ccm import numpy.lib.recfunctions as recf from utils import cosmology from load.info", "zred_now=0) all_fid_ok=[] all_fidx_ok=[] for i, this_idx in enumerate(all_final_idxs): fname = prg_dir + \"{}_adp.pickle\".format(this_idx)", "smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False) finearr[field][:,i] = np.interp(lbt, lbt_cell, r_p) else: r_p = mainarr[field][:,i] finearr[field][:,i]", "= mainarr[field] newarr[field] = np.interp(lbt_new, lbt_org, r_p) return newarr # interpolate main galaxy", "this_gal.add_merger(sat_results, sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results))) if bad_main: print(\"Bad main. Break\") break #print(i)", "an unknown reason, some of galaxies are repeatedly analized, and found in the", "Merger properties if i == 0: this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if", "newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\") interp_fields = list(this_gal.main_arr.dtype.names) for field in [\"nout\", \"nstep\"]: interp_fields.remove(field) lbt_org", "= tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for mar in mainarr: finearr[\"pos\"][finearr[\"nout\"] ==", "nnza_cell, nnza_all, tc, do_smooth=True): finetree=this_gal.maintree mainarr = this_gal.main_arr finearr = np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp =", "#import tree.halomodule as hmo #from rot2 import cell_chunk_module as ccm import numpy.lib.recfunctions as", "do_smooth: r_p = smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False) finearr[field][:,i] = np.interp(lbt, lbt_cell, r_p) else: r_p", "nouts and nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\") interp_fields = list(this_gal.main_arr.dtype.names) for field in", "result_thisnout in allresults: Allallids.append(np.array([agal.id for agal in result_thisnout])) info = Info(nout=nouts[0]) tc =", "list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] = finetree[\"id\"] finearr[\"idx\"] = finetree[\"idx\"] finearr[\"pos\"] = finetree[\"pos\"] # Pos", "#print(i, \"IDX=\",this_idx) adp = pickle.load(open(fname, \"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0]) > nstep_too_short_main: print(\"Too", "utils.match as mtc import pickle #from utils import hagn import os from rot2.analysis", "interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True) #print(\"BAD\", bad_main) elif len(sat_results) > 0 and sat_results[0].mstar", "= np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result = np.where(allresults_now_idx == ss[\"idx\"])[0] #print(\"len results\",", "else: r_p = mainarr[field][:,i] finearr[field][:,i] = np.interp(lbt, lbt_cell, mainarr[field][:,i]) else: if do_smooth: r_p", "interp_fields.remove(field) lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for field in [\"id\",", "mainarr[field][:,i] finearr[field][:,i] = np.interp(lbt, lbt_cell, mainarr[field][:,i]) else: if do_smooth: r_p = smooth(mainarr[field], window_len=5,", "of merger if mainarr[field].ndim == 2: for i in range(3): if do_smooth: r_p", "return newarr # interpolate main galaxy results on finetree. def interpol_fine(this_gal, nnza_cell, nnza_all,", "= Info(nout=nouts[0]) tc = cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[] all_fidx_ok=[] for i, this_idx in enumerate(all_final_idxs):", "import Info def fill_main(mainarr, nnza_cell, tc): # Set up a new array. new_nouts", "number results in +1 element in the smoothed array. else: r_p = mainarr[field]", "import * from rot2 import serialize_results #import tree.halomodule as hmo #from rot2 import", "all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\", \"rb\")) serial_out_dir = out_base+\"result_serial/\" if not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) # Build", "dump_prgs broken in the middle. continue #print(i, \"IDX=\",this_idx) adp = pickle.load(open(fname, \"rb\")) if", "all_fid_ok=[] all_fidx_ok=[] for i, this_idx in enumerate(all_final_idxs): fname = prg_dir + \"{}_adp.pickle\".format(this_idx) if", "= serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr = fill_main(this_gal.main_arr,", "def fill_main(mainarr, nnza_cell, tc): # Set up a new array. new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1]", "mainarr[field] newarr[field] = np.interp(lbt_new, lbt_org, r_p) return newarr # interpolate main galaxy results", "newarr[field] = np.interp(lbt_new, lbt_org, r_p) return newarr # interpolate main galaxy results on", "in enumerate(adp): nout_results=[] for sat in this_sats: sat_results=[] for ss in sat: nout=nnza_all.step2out([ss[\"nstep\"]])", "merger if mainarr[field].ndim == 2: for i in range(3): if do_smooth: r_p =", "tree. SKIP\") continue # Append age to maintree and mainresult. lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0)", "= np.interp(lbt_new, lbt_org, r_p) return newarr # interpolate main galaxy results on finetree.", "sat: nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT nnza_cell. #print(nout) if nout in nouts: #print(nout, ss[\"idx\"]) istep_cell", "proeprty exist. finearr[\"vel\"] = finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field in [\"id\", \"idx\", \"pos\",", "galaxymodule # needed for result_sub_sample_**.pickle import utils.match as mtc import pickle #from utils", "as mtc import pickle #from utils import hagn import os from rot2.analysis import", "in [\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] = mainarr[field] interp_fields.remove(field) for field in interp_fields: if", "pickle #from utils import hagn import os from rot2.analysis import * from rot2", "in allresults: Allallidxs.append(np.array([agal.idx for agal in result_thisnout])) Allallids=[] for result_thisnout in allresults: Allallids.append(np.array([agal.id", "r_p) return newarr # interpolate main galaxy results on finetree. def interpol_fine(this_gal, nnza_cell,", "os.path.isfile(fname): # dump_prgs broken in the middle. continue #print(i, \"IDX=\",this_idx) adp = pickle.load(open(fname,", "0: this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr", "agal in result_thisnout])) Allallids=[] for result_thisnout in allresults: Allallids.append(np.array([agal.id for agal in result_thisnout]))", "then, go through one more step to remove duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\",", "= np.where(allresults_now_idx == ss[\"idx\"])[0] #print(\"len results\", len(allresults_now), \"i_result\", i_result) if len(i_result) > 0:", "this_gal.finearr = interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True) #print(\"BAD\", bad_main) elif len(sat_results) > 0", "some of galaxies are repeatedly analized, and found in the result_lambda pickle. Those", "array. else: r_p = mainarr[field] finearr[field] = np.interp(lbt, lbt_cell, r_p) return finearr def", "nnza_cell, tc): # Set up a new array. new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr =", "for ss in sat: nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT nnza_cell. #print(nout) if nout in nouts:", "all_final_idxs, nnza_all, nnza_cell, istep_max = 50, prg_dir=\"./\", out_base=\"./\", nstep_too_short_main = 100): nouts =", "main. Break\") break #print(i) this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx) #break np.savetxt(serial_out_dir+\"all_fidx_ok.txt\", all_fidx_ok, fmt=\"%d\")", "r_p = smooth(mainarr[field], window_len=5, clip_tail_zeros=False) # odd number results in +1 element in", "+ \"{}_adp.pickle\".format(this_idx) if not os.path.isfile(fname): # dump_prgs broken in the middle. continue #print(i,", "\"\"\" For an unknown reason, some of galaxies are repeatedly analized, and found", "finearr[\"idx\"] = finetree[\"idx\"] finearr[\"pos\"] = finetree[\"pos\"] # Pos and vel can be overwritten", "this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell, tc) this_gal.finearr =", "\"pos\", \"vel\", \"nstep\", \"nout\"]: fields_interp.remove(field) lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0)", "== mar[\"nout\"]] = mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"vel\"] for field in fields_interp:", "newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\") interp_fields = list(this_gal.main_arr.dtype.names) for field in [\"nout\", \"nstep\"]: interp_fields.remove(field)", "utils import cosmology from load.info import Info def fill_main(mainarr, nnza_cell, tc): # Set", "in sat: nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT nnza_cell. #print(nout) if nout in nouts: #print(nout, ss[\"idx\"])", "import serialize_results #import tree.halomodule as hmo #from rot2 import cell_chunk_module as ccm import", "nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result = np.where(allresults_now_idx == ss[\"idx\"])[0] #print(\"len results\", len(allresults_now), \"i_result\", i_result)", "duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\", \"rb\")) serial_out_dir = out_base+\"result_serial/\" if not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir)", "cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[] all_fidx_ok=[] for i, this_idx in enumerate(all_final_idxs): fname = prg_dir +", "tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for field in [\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] = mainarr[field] interp_fields.remove(field) for", "tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for mar in mainarr: finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]]", "if i == 0: this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep) <=", "= list(this_gal.main_arr.dtype.names) for field in [\"nout\", \"nstep\"]: interp_fields.remove(field) lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new", "middle. continue #print(i, \"IDX=\",this_idx) adp = pickle.load(open(fname, \"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0]) >", "\"time\", lbt) max_step=len(allresults) this_gal = serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False for i, this_sats in enumerate(adp):", "fill_main(this_gal.main_arr, nnza_cell, tc) this_gal.finearr = interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True) #print(\"BAD\", bad_main) elif", "0 and sat_results[0].mstar > 0.0: #print(\"merger2\") this_gal.add_merger(sat_results, sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results))) if", "= np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp = list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] = finetree[\"id\"] finearr[\"idx\"] = finetree[\"idx\"] finearr[\"pos\"]", "if do_smooth: r_p = smooth(mainarr[field], window_len=5, clip_tail_zeros=False) # odd number results in +1", "serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False for i, this_sats in enumerate(adp): nout_results=[] for sat in this_sats:", "r_p) return finearr def serialize(allresults, all_final_idxs, nnza_all, nnza_cell, istep_max = 50, prg_dir=\"./\", out_base=\"./\",", "For an unknown reason, some of galaxies are repeatedly analized, and found in", "finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field in [\"id\", \"idx\", \"pos\", \"vel\", \"nstep\", \"nout\"]: fields_interp.remove(field) lbt =", "begining. Until then, go through one more step to remove duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir", "len(i_result) > 0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results) # Merger properties if i", "fields_interp = list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] = finetree[\"id\"] finearr[\"idx\"] = finetree[\"idx\"] finearr[\"pos\"] = finetree[\"pos\"]", "if a better measurement from galaxy proeprty exist. finearr[\"vel\"] = finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\")", "#print(\"BAD\", bad_main) elif len(sat_results) > 0 and sat_results[0].mstar > 0.0: #print(\"merger2\") this_gal.add_merger(sat_results, sat)", "#this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results))) if bad_main: print(\"Bad main. Break\") break #print(i) this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx),", "mtc import pickle #from utils import hagn import os from rot2.analysis import *", "the begining. Until then, go through one more step to remove duplicates \"\"\"", "finetree. def interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True): finetree=this_gal.maintree mainarr = this_gal.main_arr finearr =", "= mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"vel\"] for field in fields_interp: # Begining", "to remove duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\", \"rb\")) serial_out_dir = out_base+\"result_serial/\" if not", "adp = pickle.load(open(fname, \"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0]) > nstep_too_short_main: print(\"Too short main", "this_sats in enumerate(adp): nout_results=[] for sat in this_sats: sat_results=[] for ss in sat:", "r_p = mainarr[field] finearr[field] = np.interp(lbt, lbt_cell, r_p) return finearr def serialize(allresults, all_final_idxs,", "np.where(allresults_now_idx == ss[\"idx\"])[0] #print(\"len results\", len(allresults_now), \"i_result\", i_result) if len(i_result) > 0: sat_results.append(allresults_now[i_result[0]])", "main galaxy results on finetree. def interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True): finetree=this_gal.maintree mainarr", "go through one more step to remove duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\", \"rb\"))", "Build serial results and dump. # Chunks of results in each nout (lambda_results/{nout}/result_sub_sample_{nout})", "= 100): nouts = nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts: \", nouts) \"\"\" For an unknown", "nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT nnza_cell. #print(nout) if nout in nouts: #print(nout, ss[\"idx\"]) istep_cell =", "for field in fields_interp: # Begining of merger if mainarr[field].ndim == 2: for", "sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results) # Merger properties if i == 0: this_gal.main_arr = serialize_results.galresult2rec(sat_results,", "z_now=0) for field in [\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] = mainarr[field] interp_fields.remove(field) for field", "if mainarr[field].ndim == 2: for i in range(3): r_p = mainarr[field][:,i] newarr[field][:,i] =", "must be removed from the begining. Until then, go through one more step", "for result_thisnout in allresults: Allallidxs.append(np.array([agal.idx for agal in result_thisnout])) Allallids=[] for result_thisnout in", "= tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for mar in mainarr: finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"]", "mainarr[field] finearr[field] = np.interp(lbt, lbt_cell, r_p) return finearr def serialize(allresults, all_final_idxs, nnza_all, nnza_cell,", "for result_thisnout in allresults: Allallids.append(np.array([agal.id for agal in result_thisnout])) info = Info(nout=nouts[0]) tc", "all_fidx_ok=[] for i, this_idx in enumerate(all_final_idxs): fname = prg_dir + \"{}_adp.pickle\".format(this_idx) if not", "lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for field in [\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] = mainarr[field]", "z_now=0) for mar in mainarr: finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]]", "properties if i == 0: this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep)", "nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\") interp_fields = list(this_gal.main_arr.dtype.names) for field in [\"nout\", \"nstep\"]:", "be overwritten if a better measurement from galaxy proeprty exist. finearr[\"vel\"] = finetree[\"vel\"]", "= mainarr[field][:,i] finearr[field][:,i] = np.interp(lbt, lbt_cell, mainarr[field][:,i]) else: if do_smooth: r_p = smooth(mainarr[field],", "nnza_all, nnza_cell, istep_max = 50, prg_dir=\"./\", out_base=\"./\", nstep_too_short_main = 100): nouts = nnza_cell.nnza[\"nout\"][:istep_max]", "# Append age to maintree and mainresult. lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] = recf.append_fields(adp[0][0],", "else: if do_smooth: r_p = smooth(mainarr[field], window_len=5, clip_tail_zeros=False) # odd number results in", "if not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) # Build serial results and dump. # Chunks of", "# serialize_results.galresult2rec(sat_results))) if bad_main: print(\"Bad main. Break\") break #print(i) this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\"))", "clip_tail_zeros=False) finearr[field][:,i] = np.interp(lbt, lbt_cell, r_p) else: r_p = mainarr[field][:,i] finearr[field][:,i] = np.interp(lbt,", "nnza_cell, nnza_all, tc, do_smooth=True) #print(\"BAD\", bad_main) elif len(sat_results) > 0 and sat_results[0].mstar >", "interp_fields = list(this_gal.main_arr.dtype.names) for field in [\"nout\", \"nstep\"]: interp_fields.remove(field) lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0)", "nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype) # It's easy to fill nouts and nsteps.", "[\"nout\", \"nstep\"]: interp_fields.remove(field) lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for field", "for result_sub_sample_**.pickle import utils.match as mtc import pickle #from utils import hagn import", "this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr =", "#print(len(sat_results)) nout_results.append(sat_results) # Merger properties if i == 0: this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True)", "finetree[\"id\"] finearr[\"idx\"] = finetree[\"idx\"] finearr[\"pos\"] = finetree[\"pos\"] # Pos and vel can be", "+1 element in the smoothed array. else: r_p = mainarr[field] finearr[field] = np.interp(lbt,", "# Set up a new array. new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype)", "= recf.append_fields(adp[0][0], \"time\", lbt) max_step=len(allresults) this_gal = serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False for i, this_sats", "from the begining. Until then, go through one more step to remove duplicates", "r_p = mainarr[field][:,i] newarr[field][:,i] = np.interp(lbt_new, lbt_org, mainarr[field][:,i]) else: r_p = mainarr[field] newarr[field]", "serial results and dump. # Chunks of results in each nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[]", "in +1 element in the smoothed array. else: r_p = mainarr[field] finearr[field] =", "and found in the result_lambda pickle. Those repeatition must be removed from the", "dtype=mainarr.dtype) # It's easy to fill nouts and nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\")", "# finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field in [\"id\", \"idx\", \"pos\", \"vel\", \"nstep\", \"nout\"]: fields_interp.remove(field) lbt", "for i, this_sats in enumerate(adp): nout_results=[] for sat in this_sats: sat_results=[] for ss", "finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"vel\"] for field in", "i == 0: this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp():", "as hmo #from rot2 import cell_chunk_module as ccm import numpy.lib.recfunctions as recf from", "bad_main) elif len(sat_results) > 0 and sat_results[0].mstar > 0.0: #print(\"merger2\") this_gal.add_merger(sat_results, sat) cnt_merger+=1", "dump. # Chunks of results in each nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for result_thisnout in", "as recf from utils import cosmology from load.info import Info def fill_main(mainarr, nnza_cell,", "sat_results[0].mstar > 0.0: #print(\"merger2\") this_gal.add_merger(sat_results, sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results))) if bad_main: print(\"Bad", "prg_dir=\"./\", out_base=\"./\", nstep_too_short_main = 100): nouts = nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts: \", nouts) \"\"\"", "# Chunks of results in each nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for result_thisnout in allresults:", "and vel can be overwritten if a better measurement from galaxy proeprty exist.", "results on finetree. def interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True): finetree=this_gal.maintree mainarr = this_gal.main_arr", "removed from the begining. Until then, go through one more step to remove", "sat in this_sats: sat_results=[] for ss in sat: nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT nnza_cell. #print(nout)", "# interpolate main galaxy results on finetree. def interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True):", "Allallids.append(np.array([agal.id for agal in result_thisnout])) info = Info(nout=nouts[0]) tc = cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[]", "from rot2 import serialize_results #import tree.halomodule as hmo #from rot2 import cell_chunk_module as", "finearr[field][:,i] = np.interp(lbt, lbt_cell, r_p) else: r_p = mainarr[field][:,i] finearr[field][:,i] = np.interp(lbt, lbt_cell,", "prg_dir + \"{}_adp.pickle\".format(this_idx) if not os.path.isfile(fname): # dump_prgs broken in the middle. continue", "tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] = recf.append_fields(adp[0][0], \"time\", lbt) max_step=len(allresults) this_gal = serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False for", "cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results))) if bad_main: print(\"Bad main. Break\") break #print(i) this_gal.data.append(nout_results) pickle.dump(this_gal,", "this_gal = serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False for i, this_sats in enumerate(adp): nout_results=[] for sat", "Allallidxs=[] for result_thisnout in allresults: Allallidxs.append(np.array([agal.idx for agal in result_thisnout])) Allallids=[] for result_thisnout", "short main tree. SKIP\") continue # Append age to maintree and mainresult. lbt", "= np.interp(lbt_new, lbt_org, mainarr[field][:,i]) else: r_p = mainarr[field] newarr[field] = np.interp(lbt_new, lbt_org, r_p)", "serial_out_dir = out_base+\"result_serial/\" if not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) # Build serial results and dump.", "tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for mar in mainarr: finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] ==", "(lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for result_thisnout in allresults: Allallidxs.append(np.array([agal.idx for agal in result_thisnout])) Allallids=[] for", "ccm import numpy.lib.recfunctions as recf from utils import cosmology from load.info import Info", "i, this_sats in enumerate(adp): nout_results=[] for sat in this_sats: sat_results=[] for ss in", "It's easy to fill nouts and nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\") interp_fields =", "os from rot2.analysis import * from rot2 import serialize_results #import tree.halomodule as hmo", "of galaxies are repeatedly analized, and found in the result_lambda pickle. Those repeatition", "import hagn import os from rot2.analysis import * from rot2 import serialize_results #import", "Append age to maintree and mainresult. lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] = recf.append_fields(adp[0][0], \"time\",", "the result_lambda pickle. Those repeatition must be removed from the begining. Until then,", "\"i_result\", i_result) if len(i_result) > 0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results) # Merger", "field in interp_fields: if mainarr[field].ndim == 2: for i in range(3): r_p =", "mar[\"nout\"]] = mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"vel\"] for field in fields_interp: #", "tc): # Set up a new array. new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr = np.zeros(len(new_nouts),", "0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results) # Merger properties if i == 0:", "broken in the middle. continue #print(i, \"IDX=\",this_idx) adp = pickle.load(open(fname, \"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"]", "for i in range(3): if do_smooth: r_p = smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False) finearr[field][:,i] =", "i, this_idx in enumerate(all_final_idxs): fname = prg_dir + \"{}_adp.pickle\".format(this_idx) if not os.path.isfile(fname): #", "recf from utils import cosmology from load.info import Info def fill_main(mainarr, nnza_cell, tc):", "element in the smoothed array. else: r_p = mainarr[field] finearr[field] = np.interp(lbt, lbt_cell,", "> 0 and sat_results[0].mstar > 0.0: #print(\"merger2\") this_gal.add_merger(sat_results, sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results)))", "not os.path.isfile(fname): # dump_prgs broken in the middle. continue #print(i, \"IDX=\",this_idx) adp =", "fields_interp.remove(field) lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for mar in mainarr:", "\"idx\", \"pos\", \"vel\", \"nstep\", \"nout\"]: fields_interp.remove(field) lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"),", "and nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\") interp_fields = list(this_gal.main_arr.dtype.names) for field in [\"nout\",", "z_now=0) lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for field in [\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] =", "info = Info(nout=nouts[0]) tc = cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[] all_fidx_ok=[] for i, this_idx in", "and mainresult. lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] = recf.append_fields(adp[0][0], \"time\", lbt) max_step=len(allresults) this_gal =", "in interp_fields: if mainarr[field].ndim == 2: for i in range(3): r_p = mainarr[field][:,i]", "\"nout\", \"nstep\") interp_fields = list(this_gal.main_arr.dtype.names) for field in [\"nout\", \"nstep\"]: interp_fields.remove(field) lbt_org =", "# Begining of merger if mainarr[field].ndim == 2: for i in range(3): if", "recf.append_fields(adp[0][0], \"time\", lbt) max_step=len(allresults) this_gal = serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False for i, this_sats in", "# needed for result_sub_sample_**.pickle import utils.match as mtc import pickle #from utils import", "clip_tail_zeros=False) # odd number results in +1 element in the smoothed array. else:", "serialize(allresults, all_final_idxs, nnza_all, nnza_cell, istep_max = 50, prg_dir=\"./\", out_base=\"./\", nstep_too_short_main = 100): nouts", "results and dump. # Chunks of results in each nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for", "unknown reason, some of galaxies are repeatedly analized, and found in the result_lambda", "print(\"Too short main tree. SKIP\") continue # Append age to maintree and mainresult.", "bad_main: print(\"Bad main. Break\") break #print(i) this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx) #break np.savetxt(serial_out_dir+\"all_fidx_ok.txt\",", "100): nouts = nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts: \", nouts) \"\"\" For an unknown reason,", "\"all_sample_idxs.pickle\", \"rb\")) serial_out_dir = out_base+\"result_serial/\" if not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) # Build serial results", "\"{}_adp.pickle\".format(this_idx) if not os.path.isfile(fname): # dump_prgs broken in the middle. continue #print(i, \"IDX=\",this_idx)", "serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell,", "= nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype) # It's easy to fill nouts and", "ss[\"idx\"])[0] #print(\"len results\", len(allresults_now), \"i_result\", i_result) if len(i_result) > 0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell]", "# Merger properties if i == 0: this_gal.main_arr = serialize_results.galresult2rec(sat_results, is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp())", "is_main=True) #print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell, tc)", "result_thisnout in allresults: Allallidxs.append(np.array([agal.idx for agal in result_thisnout])) Allallids=[] for result_thisnout in allresults:", "finetree=this_gal.maintree mainarr = this_gal.main_arr finearr = np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp = list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] =", "numpy as np import galaxymodule # needed for result_sub_sample_**.pickle import utils.match as mtc", "list(this_gal.main_arr.dtype.names) for field in [\"nout\", \"nstep\"]: interp_fields.remove(field) lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new =", "#print(\"merger2\") this_gal.add_merger(sat_results, sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results))) if bad_main: print(\"Bad main. Break\") break", "main tree. SKIP\") continue # Append age to maintree and mainresult. lbt =", "lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for mar in mainarr: finearr[\"pos\"][finearr[\"nout\"]", "= smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False) finearr[field][:,i] = np.interp(lbt, lbt_cell, r_p) else: r_p = mainarr[field][:,i]", "#print(\"len results\", len(allresults_now), \"i_result\", i_result) if len(i_result) > 0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results))", "out_base=\"./\", nstep_too_short_main = 100): nouts = nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts: \", nouts) \"\"\" For", "can be overwritten if a better measurement from galaxy proeprty exist. finearr[\"vel\"] =", "repeatition must be removed from the begining. Until then, go through one more", "result_thisnout])) Allallids=[] for result_thisnout in allresults: Allallids.append(np.array([agal.id for agal in result_thisnout])) info =", "rot2.analysis import * from rot2 import serialize_results #import tree.halomodule as hmo #from rot2", "step to remove duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\", \"rb\")) serial_out_dir = out_base+\"result_serial/\" if", "finearr[\"id\"] = finetree[\"id\"] finearr[\"idx\"] = finetree[\"idx\"] finearr[\"pos\"] = finetree[\"pos\"] # Pos and vel", "in result_thisnout])) info = Info(nout=nouts[0]) tc = cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[] all_fidx_ok=[] for i,", "== 2: for i in range(3): r_p = mainarr[field][:,i] newarr[field][:,i] = np.interp(lbt_new, lbt_org,", "through one more step to remove duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\", \"rb\")) serial_out_dir", "2: for i in range(3): r_p = mainarr[field][:,i] newarr[field][:,i] = np.interp(lbt_new, lbt_org, mainarr[field][:,i])", "elif len(sat_results) > 0 and sat_results[0].mstar > 0.0: #print(\"merger2\") this_gal.add_merger(sat_results, sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr,", "Break\") break #print(i) this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx) #break np.savetxt(serial_out_dir+\"all_fidx_ok.txt\", all_fidx_ok, fmt=\"%d\") return", "in nouts: #print(nout, ss[\"idx\"]) istep_cell = np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result =", "= tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for field in [\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] = mainarr[field] interp_fields.remove(field)", "fields_interp: # Begining of merger if mainarr[field].ndim == 2: for i in range(3):", "\"IDX=\",this_idx) adp = pickle.load(open(fname, \"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0]) > nstep_too_short_main: print(\"Too short", "np.interp(lbt, lbt_cell, r_p) return finearr def serialize(allresults, all_final_idxs, nnza_all, nnza_cell, istep_max = 50,", "for mar in mainarr: finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]] =", "#from utils import hagn import os from rot2.analysis import * from rot2 import", "i in range(3): if do_smooth: r_p = smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False) finearr[field][:,i] = np.interp(lbt,", "istep_max = 50, prg_dir=\"./\", out_base=\"./\", nstep_too_short_main = 100): nouts = nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts:", "import galaxymodule # needed for result_sub_sample_**.pickle import utils.match as mtc import pickle #from", "nnza_all, tc, do_smooth=True) #print(\"BAD\", bad_main) elif len(sat_results) > 0 and sat_results[0].mstar > 0.0:", "nstep_too_short_main = 100): nouts = nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts: \", nouts) \"\"\" For an", "\"vel\", \"nstep\", \"nout\"]: fields_interp.remove(field) lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for", "nouts: \", nouts) \"\"\" For an unknown reason, some of galaxies are repeatedly", "z_now=0) lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for mar in mainarr: finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]] =", "Set up a new array. new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype) #", "= tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for field in [\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"],", "= 50, prg_dir=\"./\", out_base=\"./\", nstep_too_short_main = 100): nouts = nnza_cell.nnza[\"nout\"][:istep_max] print(\"Considering nouts: \",", "interpolate main galaxy results on finetree. def interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True): finetree=this_gal.maintree", "= mar[\"vel\"] for field in fields_interp: # Begining of merger if mainarr[field].ndim ==", "out_base+\"result_serial/\" if not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) # Build serial results and dump. # Chunks", "Info def fill_main(mainarr, nnza_cell, tc): # Set up a new array. new_nouts =", "> 0.0: #print(\"merger2\") this_gal.add_merger(sat_results, sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results))) if bad_main: print(\"Bad main.", "= mainarr[field] finearr[field] = np.interp(lbt, lbt_cell, r_p) return finearr def serialize(allresults, all_final_idxs, nnza_all,", "if bad_main: print(\"Bad main. Break\") break #print(i) this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx) #break", "= np.interp(lbt, lbt_cell, r_p) else: r_p = mainarr[field][:,i] finearr[field][:,i] = np.interp(lbt, lbt_cell, mainarr[field][:,i])", "r_p = mainarr[field] newarr[field] = np.interp(lbt_new, lbt_org, r_p) return newarr # interpolate main", "= tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] = recf.append_fields(adp[0][0], \"time\", lbt) max_step=len(allresults) this_gal = serialize_results.Serial_result(adp) cnt_merger=0 bad_main=False", "# NOT nnza_cell. #print(nout) if nout in nouts: #print(nout, ss[\"idx\"]) istep_cell = np.where(nnza_cell.nnza[\"nout\"]", "Allallids=[] for result_thisnout in allresults: Allallids.append(np.array([agal.id for agal in result_thisnout])) info = Info(nout=nouts[0])", "mainarr[\"nout\"])] = mainarr[field] interp_fields.remove(field) for field in interp_fields: if mainarr[field].ndim == 2: for", "galaxies are repeatedly analized, and found in the result_lambda pickle. Those repeatition must", "in enumerate(all_final_idxs): fname = prg_dir + \"{}_adp.pickle\".format(this_idx) if not os.path.isfile(fname): # dump_prgs broken", "allresults: Allallids.append(np.array([agal.id for agal in result_thisnout])) info = Info(nout=nouts[0]) tc = cosmology.Timeconvert(info, zred_now=0)", "#print(len(this_gal.main_arr.nstep), this_gal.main_arr.nstep.ptp()) if len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell, tc) this_gal.finearr", "len(this_gal.main_arr.nstep) <= this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell, tc) this_gal.finearr = interpol_fine(this_gal, nnza_cell,", "utils import hagn import os from rot2.analysis import * from rot2 import serialize_results", "pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx) #break np.savetxt(serial_out_dir+\"all_fidx_ok.txt\", all_fidx_ok, fmt=\"%d\") return [pickle.load(open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"rb\")) for this_idx", "interp_fields.remove(field) for field in interp_fields: if mainarr[field].ndim == 2: for i in range(3):", "for agal in result_thisnout])) Allallids=[] for result_thisnout in allresults: Allallids.append(np.array([agal.id for agal in", "in mainarr: finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"vel\"] for", "and sat_results[0].mstar > 0.0: #print(\"merger2\") this_gal.add_merger(sat_results, sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results))) if bad_main:", "mainarr[field] interp_fields.remove(field) for field in interp_fields: if mainarr[field].ndim == 2: for i in", "#print(nout, ss[\"idx\"]) istep_cell = np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result = np.where(allresults_now_idx ==", "lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for mar in mainarr: finearr[\"pos\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"pos\"]", "if len(i_result) > 0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results) # Merger properties if", "r_p = mainarr[field][:,i] finearr[field][:,i] = np.interp(lbt, lbt_cell, mainarr[field][:,i]) else: if do_smooth: r_p =", "mar[\"pos\"] finearr[\"vel\"][finearr[\"nout\"] == mar[\"nout\"]] = mar[\"vel\"] for field in fields_interp: # Begining of", "\", nouts) \"\"\" For an unknown reason, some of galaxies are repeatedly analized,", "serialize_results #import tree.halomodule as hmo #from rot2 import cell_chunk_module as ccm import numpy.lib.recfunctions", "field in [\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])] = mainarr[field] interp_fields.remove(field) for field in interp_fields:", "= cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[] all_fidx_ok=[] for i, this_idx in enumerate(all_final_idxs): fname = prg_dir", "lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for field in [\"id\", \"idx\"]:", "ss[\"idx\"]) istep_cell = np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result = np.where(allresults_now_idx == ss[\"idx\"])[0]", "one more step to remove duplicates \"\"\" all_sample_idxs=pickle.load(open(prg_dir + \"all_sample_idxs.pickle\", \"rb\")) serial_out_dir =", "> 0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results) # Merger properties if i ==", "\"nout\"]: fields_interp.remove(field) lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for mar in", "= finetree[\"idx\"] finearr[\"pos\"] = finetree[\"pos\"] # Pos and vel can be overwritten if", "new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype) # It's easy to fill nouts", "measurement from galaxy proeprty exist. finearr[\"vel\"] = finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field in", "allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result = np.where(allresults_now_idx == ss[\"idx\"])[0] #print(\"len results\", len(allresults_now), \"i_result\", i_result) if", "fill nouts and nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\") interp_fields = list(this_gal.main_arr.dtype.names) for field", "tree.halomodule as hmo #from rot2 import cell_chunk_module as ccm import numpy.lib.recfunctions as recf", "serialize_results.galresult2rec(sat_results))) if bad_main: print(\"Bad main. Break\") break #print(i) this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx)", "results in each nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for result_thisnout in allresults: Allallidxs.append(np.array([agal.idx for agal", "= finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field in [\"id\", \"idx\", \"pos\", \"vel\", \"nstep\", \"nout\"]:", "+ \"all_sample_idxs.pickle\", \"rb\")) serial_out_dir = out_base+\"result_serial/\" if not os.path.isdir(serial_out_dir): os.mkdir(serial_out_dir) # Build serial", "nouts) \"\"\" For an unknown reason, some of galaxies are repeatedly analized, and", "tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for field in [\"id\", \"idx\"]: newarr[field][mtc.match_list_ind(newarr[\"nout\"], mainarr[\"nout\"])]", "finearr[\"pos\"] = finetree[\"pos\"] # Pos and vel can be overwritten if a better", "> 0]) > nstep_too_short_main: print(\"Too short main tree. SKIP\") continue # Append age", "return finearr def serialize(allresults, all_final_idxs, nnza_all, nnza_cell, istep_max = 50, prg_dir=\"./\", out_base=\"./\", nstep_too_short_main", "tc = cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[] all_fidx_ok=[] for i, this_idx in enumerate(all_final_idxs): fname =", "def interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True): finetree=this_gal.maintree mainarr = this_gal.main_arr finearr = np.zeros(len(finetree),dtype=mainarr.dtype)", "bad_main=False for i, this_sats in enumerate(adp): nout_results=[] for sat in this_sats: sat_results=[] for", "a new array. new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype) # It's easy", "range(3): r_p = mainarr[field][:,i] newarr[field][:,i] = np.interp(lbt_new, lbt_org, mainarr[field][:,i]) else: r_p = mainarr[field]", "def serialize(allresults, all_final_idxs, nnza_all, nnza_cell, istep_max = 50, prg_dir=\"./\", out_base=\"./\", nstep_too_short_main = 100):", "# It's easy to fill nouts and nsteps. newarr[\"nout\"]=new_nouts newarr[\"nstep\"]=nnza_cell.a2b(newarr[\"nout\"], \"nout\", \"nstep\") interp_fields", "smooth(mainarr[field], window_len=5, clip_tail_zeros=False) # odd number results in +1 element in the smoothed", "0.0: #print(\"merger2\") this_gal.add_merger(sat_results, sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results))) if bad_main: print(\"Bad main. Break\")", "else: r_p = mainarr[field] finearr[field] = np.interp(lbt, lbt_cell, r_p) return finearr def serialize(allresults,", "sat_results=[] for ss in sat: nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT nnza_cell. #print(nout) if nout in", "in [\"nout\", \"nstep\"]: interp_fields.remove(field) lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for", "min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0]) > nstep_too_short_main: print(\"Too short main tree. SKIP\") continue # Append", "lbt_org, r_p) return newarr # interpolate main galaxy results on finetree. def interpol_fine(this_gal,", "i_result = np.where(allresults_now_idx == ss[\"idx\"])[0] #print(\"len results\", len(allresults_now), \"i_result\", i_result) if len(i_result) >", "interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True): finetree=this_gal.maintree mainarr = this_gal.main_arr finearr = np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp", "this_gal.main_arr finearr = np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp = list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] = finetree[\"id\"] finearr[\"idx\"] =", "finearr def serialize(allresults, all_final_idxs, nnza_all, nnza_cell, istep_max = 50, prg_dir=\"./\", out_base=\"./\", nstep_too_short_main =", "finearr[field][:,i] = np.interp(lbt, lbt_cell, mainarr[field][:,i]) else: if do_smooth: r_p = smooth(mainarr[field], window_len=5, clip_tail_zeros=False)", "\"nstep\"]: interp_fields.remove(field) lbt_org = tc.zred2gyr(nnza_cell.a2b(mainarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_new = tc.zred2gyr(nnza_cell.a2b(newarr[\"nstep\"],\"nstep\",\"zred\"), z_now=0) for field in", "tc) this_gal.finearr = interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True) #print(\"BAD\", bad_main) elif len(sat_results) >", "sat) cnt_merger+=1 #this_gal.mergers.append(serialize_results.get_merger_props(this_gal.main_arr, # serialize_results.galresult2rec(sat_results))) if bad_main: print(\"Bad main. Break\") break #print(i) this_gal.data.append(nout_results)", "mainarr[field].ndim == 2: for i in range(3): r_p = mainarr[field][:,i] newarr[field][:,i] = np.interp(lbt_new,", "= list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] = finetree[\"id\"] finearr[\"idx\"] = finetree[\"idx\"] finearr[\"pos\"] = finetree[\"pos\"] #", "needed for result_sub_sample_**.pickle import utils.match as mtc import pickle #from utils import hagn", "as np import galaxymodule # needed for result_sub_sample_**.pickle import utils.match as mtc import", "[\"id\", \"idx\", \"pos\", \"vel\", \"nstep\", \"nout\"]: fields_interp.remove(field) lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0) lbt_cell =", "= pickle.load(open(fname, \"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0]) > nstep_too_short_main: print(\"Too short main tree.", "results\", len(allresults_now), \"i_result\", i_result) if len(i_result) > 0: sat_results.append(allresults_now[i_result[0]]) sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results)", "lbt_cell, r_p) else: r_p = mainarr[field][:,i] finearr[field][:,i] = np.interp(lbt, lbt_cell, mainarr[field][:,i]) else: if", "nout_results=[] for sat in this_sats: sat_results=[] for ss in sat: nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT", "np.interp(lbt_new, lbt_org, r_p) return newarr # interpolate main galaxy results on finetree. def", "np import galaxymodule # needed for result_sub_sample_**.pickle import utils.match as mtc import pickle", "do_smooth=True) #print(\"BAD\", bad_main) elif len(sat_results) > 0 and sat_results[0].mstar > 0.0: #print(\"merger2\") this_gal.add_merger(sat_results,", "in the middle. continue #print(i, \"IDX=\",this_idx) adp = pickle.load(open(fname, \"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] >", "in range(3): if do_smooth: r_p = smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False) finearr[field][:,i] = np.interp(lbt, lbt_cell,", "<= this_gal.main_arr.nstep.ptp(): #bad_main=True this_gal.main_arr = fill_main(this_gal.main_arr, nnza_cell, tc) this_gal.finearr = interpol_fine(this_gal, nnza_cell, nnza_all,", "mar[\"nout\"]] = mar[\"vel\"] for field in fields_interp: # Begining of merger if mainarr[field].ndim", "continue #print(i, \"IDX=\",this_idx) adp = pickle.load(open(fname, \"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0]) > nstep_too_short_main:", "array. new_nouts = nnza_cell.nnza[\"nout\"][:mainarr[\"nstep\"].ptp()+1] newarr = np.zeros(len(new_nouts), dtype=mainarr.dtype) # It's easy to fill", "of results in each nout (lambda_results/{nout}/result_sub_sample_{nout}) Allallidxs=[] for result_thisnout in allresults: Allallidxs.append(np.array([agal.idx for", "# odd number results in +1 element in the smoothed array. else: r_p", "this_idx in enumerate(all_final_idxs): fname = prg_dir + \"{}_adp.pickle\".format(this_idx) if not os.path.isfile(fname): # dump_prgs", "= np.interp(lbt, lbt_cell, r_p) return finearr def serialize(allresults, all_final_idxs, nnza_all, nnza_cell, istep_max =", "# Build serial results and dump. # Chunks of results in each nout", "from load.info import Info def fill_main(mainarr, nnza_cell, tc): # Set up a new", "reason, some of galaxies are repeatedly analized, and found in the result_lambda pickle.", "allresults_now_idx=Allallidxs[istep_cell] i_result = np.where(allresults_now_idx == ss[\"idx\"])[0] #print(\"len results\", len(allresults_now), \"i_result\", i_result) if len(i_result)", "continue # Append age to maintree and mainresult. lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] =", "pickle.load(open(fname, \"rb\")) if min(adp[0][0][\"nstep\"][adp[0][0][\"nstep\"] > 0]) > nstep_too_short_main: print(\"Too short main tree. SKIP\")", "cosmology from load.info import Info def fill_main(mainarr, nnza_cell, tc): # Set up a", "interp_fields: if mainarr[field].ndim == 2: for i in range(3): r_p = mainarr[field][:,i] newarr[field][:,i]", "better measurement from galaxy proeprty exist. finearr[\"vel\"] = finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field", "for agal in result_thisnout])) info = Info(nout=nouts[0]) tc = cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[] all_fidx_ok=[]", "mainresult. lbt = tc.zred2gyr(nnza_all.a2b(adp[0][0][\"nstep\"],\"nstep\",\"zred\"),z_now=0) adp[0][0] = recf.append_fields(adp[0][0], \"time\", lbt) max_step=len(allresults) this_gal = serialize_results.Serial_result(adp)", "vel can be overwritten if a better measurement from galaxy proeprty exist. finearr[\"vel\"]", "nnza_all, tc, do_smooth=True): finetree=this_gal.maintree mainarr = this_gal.main_arr finearr = np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp = list(mainarr.dtype.names)", "result_thisnout])) info = Info(nout=nouts[0]) tc = cosmology.Timeconvert(info, zred_now=0) all_fid_ok=[] all_fidx_ok=[] for i, this_idx", "sat_results[-1].nout=int(nout) sat_results[-1].nstep=nnza_cell.nnza[\"nstep\"][istep_cell] #print(len(sat_results)) nout_results.append(sat_results) # Merger properties if i == 0: this_gal.main_arr =", "nout in nouts: #print(nout, ss[\"idx\"]) istep_cell = np.where(nnza_cell.nnza[\"nout\"] == nout)[0][0] allresults_now=allresults[istep_cell] allresults_now_idx=Allallidxs[istep_cell] i_result", "rot2 import serialize_results #import tree.halomodule as hmo #from rot2 import cell_chunk_module as ccm", "field in [\"id\", \"idx\", \"pos\", \"vel\", \"nstep\", \"nout\"]: fields_interp.remove(field) lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"), z_now=0)", "this_sats: sat_results=[] for ss in sat: nout=nnza_all.step2out([ss[\"nstep\"]]) # NOT nnza_cell. #print(nout) if nout", "mainarr[field][:,i] newarr[field][:,i] = np.interp(lbt_new, lbt_org, mainarr[field][:,i]) else: r_p = mainarr[field] newarr[field] = np.interp(lbt_new,", "from galaxy proeprty exist. finearr[\"vel\"] = finetree[\"vel\"] # finearr[\"nout\"]=nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"nout\") for field in [\"id\",", "Begining of merger if mainarr[field].ndim == 2: for i in range(3): if do_smooth:", "are repeatedly analized, and found in the result_lambda pickle. Those repeatition must be", "on finetree. def interpol_fine(this_gal, nnza_cell, nnza_all, tc, do_smooth=True): finetree=this_gal.maintree mainarr = this_gal.main_arr finearr", "fname = prg_dir + \"{}_adp.pickle\".format(this_idx) if not os.path.isfile(fname): # dump_prgs broken in the", "mainarr[field].ndim == 2: for i in range(3): if do_smooth: r_p = smooth(mainarr[field][:,i], window_len=5,", "== 2: for i in range(3): if do_smooth: r_p = smooth(mainarr[field][:,i], window_len=5, clip_tail_zeros=False)", "finearr = np.zeros(len(finetree),dtype=mainarr.dtype) fields_interp = list(mainarr.dtype.names) finearr[\"nstep\"]=finetree[\"nstep\"] finearr[\"id\"] = finetree[\"id\"] finearr[\"idx\"] = finetree[\"idx\"]", "#from rot2 import cell_chunk_module as ccm import numpy.lib.recfunctions as recf from utils import", "os.mkdir(serial_out_dir) # Build serial results and dump. # Chunks of results in each", "Allallidxs.append(np.array([agal.idx for agal in result_thisnout])) Allallids=[] for result_thisnout in allresults: Allallids.append(np.array([agal.id for agal", "open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx) #break np.savetxt(serial_out_dir+\"all_fidx_ok.txt\", all_fidx_ok, fmt=\"%d\") return [pickle.load(open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"rb\")) for this_idx in", "this_gal.data.append(nout_results) pickle.dump(this_gal, open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"wb\")) all_fidx_ok.append(this_idx) #break np.savetxt(serial_out_dir+\"all_fidx_ok.txt\", all_fidx_ok, fmt=\"%d\") return [pickle.load(open(serial_out_dir+\"serial_result{}.pickle\".format(this_idx), \"rb\")) for", "for field in [\"id\", \"idx\", \"pos\", \"vel\", \"nstep\", \"nout\"]: fields_interp.remove(field) lbt = tc.zred2gyr(nnza_all.a2b(finetree[\"nstep\"],\"nstep\",\"zred\"),", "= finetree[\"pos\"] # Pos and vel can be overwritten if a better measurement" ]
[ "def delete_repositories(ids): credential_groups = [] with LocalSession() as session: for id in ids:", "= credential_list.pop('repo_password') respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list) > 0 else None,", "item.pop(\"atime\") if item.get('ctime'): try: item['ctime'] = parser.parse(item['ctime']) except ValueError: item['ctime'] = None item['created_time']", "= SnapshotObject( name=item.get('name'), type=item.get('type'), path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'),", "might take a bit of time def get_info(id, repository_interface=None, use_cache=False, repo_status=True): info_dict =", "= None item['accessed_time'] = item.pop(\"atime\") if item.get('ctime'): try: item['ctime'] = parser.parse(item['ctime']) except ValueError:", "def get_formatted_repository_interface_from_id(id): try: with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: credential_list", "as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit() def get_snapshot_objects(snap_id, use_cache=False): with LocalSession()", "credential_groups: credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository =", "LocalSession() as session: repository = session.query(Repository).filter_by(id=repo_id).first() if repository.name != info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name", "session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return snapshot def insert_snapshots(items, repo_id): with LocalSession() as session: for item", "repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') ) session.add(repository) session.commit() return repository.id def update_repository(info, repo_id, sync_db=False, unsync_db=False):", "Returns list of snapshots from # the database if use_cache is set to", "from resticweb.dictionary.resticweb_constants import Repository as Rep from resticweb.models.general import Repository, Snapshot, SnapshotObject, JobParameter,", "info['address'] repository.cache_repo = info['cache_repo'] repository.concurrent_uses = info['concurrent_uses'] repository.timeout = info['timeout'] repository.parameters = json.dumps(info['parameters'])", "snapshots = [] if not use_cache and repository_interface.is_online(): snapshots = repository_interface.get_snapshots() return snapshots", "extra # item['snap_time'] = datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] = parser.parse(main_time) new_snapshot = Snapshot( snap_id=item.get('snap_id'),", "from resticweb.misc.credential_manager import credential_manager # from .repository import ResticRepository from .repository_formatted import ResticRepositoryFormatted", "item['snap_id'] = item.pop('id') item['snap_short_id'] = item.pop('short_id') item['snap_time'] = item.pop('time') if item['snap_time']: main_time =", "snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository # gets basic info about", "to False. Returns list of snapshots from # the database if use_cache is", "= None item['created_time'] = item.pop(\"ctime\") new_item = SnapshotObject( name=item.get('name'), type=item.get('type'), path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'),", "database from the # repository if use_cache is set to False. Returns list", "= repository_interface.get_stats() with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if", "session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data: repository.data = json.dumps(misc_data) session.commit() else: try: misc_data = json.loads(repository.data) except", "except ValueError: item['mtime'] = None item['modified_time'] = item.pop(\"mtime\") if item.get('atime'): try: item['atime'] =", "is set to False. Returns list of snapshots from # the database if", "try: with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: credential_list = credential_manager.get_group_credentials(repository.credential_group_id)", "= None if repo_status: if not repository_interface: repository_interface = get_formatted_repository_interface_from_id(id) repo_status = repository_interface.is_offline()", "repositories = session.query(Repository).filter_by() for repository in repositories: repository_list.append((repository.id, repository.name)) return repository_list def get_snapshot_info(id):", "def get_snapshot(repo_id, snapshot_id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(repo_id) if not use_cache and repository_interface.is_online(): snapshot", "= parser.parse(main_time) new_snapshot = Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')),", "snapshots else {} else: with LocalSession() as session: snapshots = session.query(Snapshot).filter_by(repository_id=id).all() return snapshots", "{} misc_data = None if repo_status: if not repository_interface: repository_interface = get_formatted_repository_interface_from_id(id) repo_status", "bit of time def get_info(id, repository_interface=None, use_cache=False, repo_status=True): info_dict = {} misc_data =", "item['atime'] = None item['accessed_time'] = item.pop(\"atime\") if item.get('ctime'): try: item['ctime'] = parser.parse(item['ctime']) except", "with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id)", "misc_data['status'] = repo_status info_dict = dict( id=repository.id, name=repository.name, description=repository.description, repo_id=repository.repo_id, address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses,", "''' for snapshot in repository.snapshots: snapshot.snapshot_objects = [] session.commit() ''' job_builder = JobBuilder(job_name=f'Clear", "repo is online, we can purge the snapshots from db as we will", "= [] if not use_cache and repository_interface.is_online(): snapshots = repository_interface.get_snapshots() return snapshots if", "snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) ) session.add(new_snapshot) session.commit() def delete_snapshot(repo_id,", "to False then the repo stats are grabbed from repo itself # which", "repository_interface.get_snapshots() return snapshots if snapshots else {} else: with LocalSession() as session: snapshots", "LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else: return", "misc_data = None if repo_status: if not repository_interface: repository_interface = get_formatted_repository_interface_from_id(id) repo_status =", "of snapshots from # the database if use_cache is set to True def", "repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id) if not use_cache and repository_interface.is_online(): # if", "sync_single_snapshot import json import traceback from datetime import datetime from resticweb.dateutil import parser", ") session.add(repository) session.commit() return repository.id def update_repository(info, repo_id, sync_db=False, unsync_db=False): with LocalSession() as", "return repository.id def update_repository(info, repo_id, sync_db=False, unsync_db=False): with LocalSession() as session: repository =", "with LocalSession() as session: for item in items: if item.get('mtime'): try: item['mtime'] =", "info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name = info['name'] repository.description = info.get('description') repository.address = info['address'] repository.cache_repo", "LocalSession() as session: repository = Repository( name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'),", "files. # if use_cache is set to False then the repo stats are", "# if repository.cache_repo: # sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface) return object_list else: with LocalSession() as", "try: item['atime'] = parser.parse(item['atime']) except ValueError: item['atime'] = None item['accessed_time'] = item.pop(\"atime\") if", "use_cache=False): repository_interface = get_formatted_repository_interface_from_id(id) snapshots = [] if not use_cache and repository_interface.is_online(): snapshots", "with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data: repository.data", "LocalSession() as session: snapshots = session.query(Snapshot).filter_by(repository_id=id).all() return snapshots def get_snapshot(repo_id, snapshot_id, use_cache=False): repository_interface", "= session.query(Repository).filter_by(id=id).first() if repository: return repository.address else: return None def get_repository_password(id): with LocalSession()", "get_repository_from_snap_id(snap_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository", "repository.description = info.get('description') repository.address = info['address'] repository.cache_repo = info['cache_repo'] repository.concurrent_uses = info['concurrent_uses'] repository.timeout", "if item.get('ctime'): try: item['ctime'] = parser.parse(item['ctime']) except ValueError: item['ctime'] = None item['created_time'] =", "from resticweb.models.general import Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType from resticweb.tools.local_session import LocalSession from", "''' job_builder = JobBuilder(job_name=f'Clear db from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job() return repo_id", "snapshots = repository_interface.get_snapshots() return snapshots if snapshots else {} else: with LocalSession() as", "delete_repositories(ids): credential_groups = [] with LocalSession() as session: for id in ids: repo_to_remove", "= item.pop(\"ctime\") new_item = SnapshotObject( name=item.get('name'), type=item.get('type'), path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'),", "> 0 else None, id) return respository_interface except Exception as e: logger.error(e) logger.error(\"trace:\"", "snapshot if snapshot else {} else: with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id,", "= Repository( name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') )", "used instead of the following method # it's located under resticweb.tools.job_callbacks def add_repository(info):", "try: snapshot.paths = json.loads(snapshot.paths) except ValueError: pass if snapshot.tags: try: snapshot.tags = json.loads(snapshot.tags)", "= get_formatted_repository_interface_from_id(id) repo_status = repository_interface.is_offline() if not use_cache: if not repo_status: misc_data =", "= info['name'] repository.description = info.get('description') repository.address = info['address'] repository.cache_repo = info['cache_repo'] repository.concurrent_uses =", "with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit() def get_snapshot_objects(snap_id, use_cache=False):", "repository_interface.is_offline() if not use_cache: if not repo_status: misc_data = repository_interface.get_stats() with LocalSession() as", "repo_status = repository_interface.is_offline() if not use_cache: if not repo_status: misc_data = repository_interface.get_stats() with", "if not use_cache and repository_interface.is_online(): snapshots = repository_interface.get_snapshots() return snapshots if snapshots else", "= repository_interface.is_offline() if not use_cache: if not repo_status: misc_data = repository_interface.get_stats() with LocalSession()", "# repository_add_to_db is used instead of the following method # it's located under", "fresh from the actual repo object_list = repository_interface.get_snapshot_ls(snap_id) # if repository.cache_repo: # sync_snapshot_objects(repository.id,", "[] with LocalSession() as session: repositories = session.query(Repository).filter_by() for repository in repositories: repository_list.append((repository.id,", "def add_repository(info): with LocalSession() as session: repository = Repository( name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'],", "job_builder = JobBuilder(job_name=f'Clear db from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job() return repo_id def", "from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot import json import traceback from datetime import", "item.pop('time') if item['snap_time']: main_time = item['snap_time'][:-7] extra = item['snap_time'][-6:] main_time = main_time +", "tags=json.dumps(item.get('tags')) ) session.add(new_snapshot) session.commit() def delete_snapshot(repo_id, snapshot_id): with LocalSession() as session: snapshot =", "if repository.name != info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name = info['name'] repository.description = info.get('description') repository.address", "# it's located under resticweb.tools.job_callbacks def add_repository(info): with LocalSession() as session: repository =", "repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) ) session.add(new_snapshot) session.commit() def delete_snapshot(repo_id, snapshot_id): with LocalSession() as session:", "it's located under resticweb.tools.job_callbacks def add_repository(info): with LocalSession() as session: repository = Repository(", "Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) ) session.add(new_snapshot) session.commit()", "get_formatted_repository_interface_from_id(id): try: with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: credential_list =", "return \"Offline\" def get_repository_name(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository:", "if repository: return repository.address else: return None def get_repository_password(id): with LocalSession() as session:", "get status\" else: if status: return \"Online\" else: return \"Offline\" def get_repository_name(id): with", "def get_repository_password(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return credential_manager.get_credential(repository.credential_group_id,", "grabbed from repo itself # which might take a bit of time def", "as we will # just re-add them fresh from the actual repo object_list", "repository.name = info['name'] repository.description = info.get('description') repository.address = info['address'] repository.cache_repo = info['cache_repo'] repository.concurrent_uses", "data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name ) return info_dict # returns a list of snapshots and", "with LocalSession() as session: for item in items: item['snap_id'] = item.pop('id') item['snap_short_id'] =", "else None, id) return respository_interface except Exception as e: logger.error(e) logger.error(\"trace:\" + traceback.format_exc())", "as session: snapshots = session.query(Snapshot).filter_by(repository_id=id).all() return snapshots def get_snapshot(repo_id, snapshot_id, use_cache=False): repository_interface =", "not use_cache and repository_interface.is_online(): snapshot = repository_interface.get_snapshots(snapshot_id)[0] return snapshot if snapshot else {}", "resticweb.dateutil import parser import logging logger = logging.getLogger('debugLogger') # repository_add_to_db is used instead", "\"Couldn't get status\" else: if status: return \"Online\" else: return \"Offline\" def get_repository_name(id):", "# if use_cache is set to False then the repo stats are grabbed", "sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface) return object_list else: with LocalSession() as session: snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all()", "# item['snap_time'] = datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] = parser.parse(main_time) new_snapshot = Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'),", "repository.cache_repo: # sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface) return object_list else: with LocalSession() as session: snapshot_object_list", "main_time + extra # item['snap_time'] = datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] = parser.parse(main_time) new_snapshot =", "use_cache: if not repo_status: misc_data = repository_interface.get_stats() with LocalSession() as session: repository =", "import LocalSession from resticweb.misc.credential_manager import credential_manager # from .repository import ResticRepository from .repository_formatted", "repository_list def get_snapshot_info(id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths: try:", "JobBuilder(job_name=f\"Sync repo {repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job() if unsync_db: ''' for snapshot in", "return repository # gets basic info about the repository from the database. Also", "with LocalSession() as session: repository = session.query(Repository).filter_by(id=repo_id).first() if repository.name != info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name'])", "if item['snap_time']: main_time = item['snap_time'][:-7] extra = item['snap_time'][-6:] main_time = main_time + extra", "else: if status: return \"Online\" else: return \"Offline\" def get_repository_name(id): with LocalSession() as", "try: item['mtime'] = parser.parse(item['mtime']) except ValueError: item['mtime'] = None item['modified_time'] = item.pop(\"mtime\") if", "= session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for parameter in job_parameters:", "def get_snapshot_info(id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths: try: snapshot.paths", "if status is None: return \"Couldn't get status\" else: if status: return \"Online\"", "status is None: return \"Couldn't get status\" else: if status: return \"Online\" else:", "as Rep from resticweb.models.general import Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType from resticweb.tools.local_session import", "from resticweb.tools.local_session import LocalSession from resticweb.misc.credential_manager import credential_manager # from .repository import ResticRepository", "hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) ) session.add(new_snapshot) session.commit() def delete_snapshot(repo_id, snapshot_id): with", "def insert_snapshots(items, repo_id): with LocalSession() as session: for item in items: item['snap_id'] =", "session.commit() ''' job_builder = JobBuilder(job_name=f'Clear db from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job() return", "try: snapshot.tags = json.loads(snapshot.tags) except ValueError: pass return snapshot def get_repository_status(id): repository_interface =", "repository = session.query(Repository).filter_by(id=repo_id).first() if repository.name != info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name = info['name'] repository.description", "with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return snapshot def insert_snapshots(items, repo_id):", "repository from the database. Also grabs the stats # from the repository itself", "\"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] = parser.parse(main_time) new_snapshot = Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'),", "repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id) if not use_cache and repository_interface.is_online(): # if the repo is", "repository itself like the total size and number of files. # if use_cache", "from resticweb.dateutil import parser import logging logger = logging.getLogger('debugLogger') # repository_add_to_db is used", "# sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface) return object_list else: with LocalSession() as session: snapshot_object_list =", "id in credential_groups: credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first()", "number of files. # if use_cache is set to False then the repo", "parser.parse(item['ctime']) except ValueError: item['ctime'] = None item['created_time'] = item.pop(\"ctime\") new_item = SnapshotObject( name=item.get('name'),", "in repository.snapshots: snapshot.snapshot_objects = [] session.commit() ''' job_builder = JobBuilder(job_name=f'Clear db from repo", "None if repo_status: if not repository_interface: repository_interface = get_formatted_repository_interface_from_id(id) repo_status = repository_interface.is_offline() if", "repository_interface = get_formatted_repository_interface_from_id(id) snapshots = [] if not use_cache and repository_interface.is_online(): snapshots =", "repository_interface = get_formatted_repository_interface_from_id(repo_id) if not use_cache and repository_interface.is_online(): snapshot = repository_interface.get_snapshots(snapshot_id)[0] return snapshot", "created_time=item.get('created_time'), snapshot_id=snap_id ) session.add(new_item) session.commit() def get_engine_repositories(): repository_list = [] with LocalSession() as", "session.query(Repository).filter_by(id=id).first() if repository: return repository.name else: return None def get_repository_address(id): with LocalSession() as", "= session.query(Repository).filter_by(id=id).first() if repository: credential_list = credential_manager.get_group_credentials(repository.credential_group_id) if credential_list: repo_password = credential_list.pop('repo_password') respository_interface", "info about the repository from the database. Also grabs the stats # from", "and number of files. # if use_cache is set to False then the", "in job_parameters: parameter.param_value = None session.delete(repo_to_remove) session.commit() for id in credential_groups: credential_manager.remove_credentials(id) def", "credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first()", "path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id ) session.add(new_item) session.commit()", "modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id ) session.add(new_item) session.commit() def get_engine_repositories(): repository_list = [] with", "import parser import logging logger = logging.getLogger('debugLogger') # repository_add_to_db is used instead of", "as session: snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object in snapshot_object_list] return", "time def get_info(id, repository_interface=None, use_cache=False, repo_status=True): info_dict = {} misc_data = None if", "= item.pop('short_id') item['snap_time'] = item.pop('time') if item['snap_time']: main_time = item['snap_time'][:-7] extra = item['snap_time'][-6:]", "# repository if use_cache is set to False. Returns list of snapshots from", "repository.name != info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name = info['name'] repository.description = info.get('description') repository.address =", "sync_snapshots, sync_snapshot_objects, sync_single_snapshot import json import traceback from datetime import datetime from resticweb.dateutil", "session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository # gets basic info", "session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit() def get_snapshot_objects(snap_id, use_cache=False): with LocalSession() as", "if not repo_status: misc_data = repository_interface.get_stats() with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first()", "return repo_id def delete_repositories(ids): credential_groups = [] with LocalSession() as session: for id", "extra = item['snap_time'][-6:] main_time = main_time + extra # item['snap_time'] = datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\")", "db as we will # just re-add them fresh from the actual repo", "repository # gets basic info about the repository from the database. Also grabs", "else: return None def get_formatted_repository_interface_from_id(id): try: with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first()", "Repository( name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') ) session.add(repository)", "session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id) if not use_cache and repository_interface.is_online(): # if the repo", "get_engine_repositories(): repository_list = [] with LocalSession() as session: repositories = session.query(Repository).filter_by() for repository", "snapshot.tags: try: snapshot.tags = json.loads(snapshot.tags) except ValueError: pass return snapshot def get_repository_status(id): repository_interface", "traceback from datetime import datetime from resticweb.dateutil import parser import logging logger =", "session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.name else: return None def get_repository_address(id):", "ValueError: item['atime'] = None item['accessed_time'] = item.pop(\"atime\") if item.get('ctime'): try: item['ctime'] = parser.parse(item['ctime'])", "get_snapshots(id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(id) snapshots = [] if not use_cache and repository_interface.is_online():", "= json.loads(repository.data) except TypeError: misc_data = dict(data=repository.data) misc_data['status'] = repo_status info_dict = dict(", "parameter.param_value = None session.delete(repo_to_remove) session.commit() for id in credential_groups: credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id): with", "database. Also grabs the stats # from the repository itself like the total", "parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job() if unsync_db: ''' for snapshot in repository.snapshots: snapshot.snapshot_objects = []", "TypeError: misc_data = dict(data=repository.data) misc_data['status'] = repo_status info_dict = dict( id=repository.id, name=repository.name, description=repository.description,", "snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object in snapshot_object_list] return snapshot_dict_list def", "if status: return \"Online\" else: return \"Offline\" def get_repository_name(id): with LocalSession() as session:", "{repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job() return repo_id def delete_repositories(ids): credential_groups = [] with LocalSession()", "size and number of files. # if use_cache is set to False then", "item in items: if item.get('mtime'): try: item['mtime'] = parser.parse(item['mtime']) except ValueError: item['mtime'] =", "concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') ) session.add(repository) session.commit() return repository.id def update_repository(info, repo_id, sync_db=False, unsync_db=False): with", "to True def get_snapshots(id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(id) snapshots = [] if not", "LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.name else: return None", "LocalSession() as session: for item in items: item['snap_id'] = item.pop('id') item['snap_short_id'] = item.pop('short_id')", "False. Returns list of snapshots from # the database if use_cache is set", "snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object in snapshot_object_list] return snapshot_dict_list def delete_snapshot_objects(snap_id): pass def", "def get_repository_from_snap_id(snap_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() return", "snapshot else {} else: with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return", "None: return \"Couldn't get status\" else: if status: return \"Online\" else: return \"Offline\"", "into the database from the # repository if use_cache is set to False.", "unsync_db=False): with LocalSession() as session: repository = session.query(Repository).filter_by(id=repo_id).first() if repository.name != info['name']: credential_manager.set_service_id(repository.credential_group_id,", "snapshot def get_repository_status(id): repository_interface = get_formatted_repository_interface_from_id(id) status = repository_interface.is_online() if status is None:", "= dict(data=repository.data) misc_data['status'] = repo_status info_dict = dict( id=repository.id, name=repository.name, description=repository.description, repo_id=repository.repo_id, address=repository.address,", "from the actual repo object_list = repository_interface.get_snapshot_ls(snap_id) # if repository.cache_repo: # sync_snapshot_objects(repository.id, snap_id,", "under resticweb.tools.job_callbacks def add_repository(info): with LocalSession() as session: repository = Repository( name=info['name'], description=info.get('description'),", "info['name'] repository.description = info.get('description') repository.address = info['address'] repository.cache_repo = info['cache_repo'] repository.concurrent_uses = info['concurrent_uses']", "in items: item['snap_id'] = item.pop('id') item['snap_short_id'] = item.pop('short_id') item['snap_time'] = item.pop('time') if item['snap_time']:", "main_time = main_time + extra # item['snap_time'] = datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] = parser.parse(main_time)", "repository_add_to_db is used instead of the following method # it's located under resticweb.tools.job_callbacks", "import credential_manager # from .repository import ResticRepository from .repository_formatted import ResticRepositoryFormatted from resticweb.tools.repository_tools", "session.query(Repository).filter_by(id=id).first() if repository: return repository.address else: return None def get_repository_password(id): with LocalSession() as", "set to True def get_snapshots(id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(id) snapshots = [] if", "if unsync_db: ''' for snapshot in repository.snapshots: snapshot.snapshot_objects = [] session.commit() ''' job_builder", "LocalSession from resticweb.misc.credential_manager import credential_manager # from .repository import ResticRepository from .repository_formatted import", "repo_status info_dict = dict( id=repository.id, name=repository.name, description=repository.description, repo_id=repository.repo_id, address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data,", "item.pop('short_id') item['snap_time'] = item.pop('time') if item['snap_time']: main_time = item['snap_time'][:-7] extra = item['snap_time'][-6:] main_time", "itself like the total size and number of files. # if use_cache is", "if sync_db: job_builder = JobBuilder(job_name=f\"Sync repo {repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job() if unsync_db:", "return repository.address else: return None def get_repository_password(id): with LocalSession() as session: repository =", "# which might take a bit of time def get_info(id, repository_interface=None, use_cache=False, repo_status=True):", "session: repository = session.query(Repository).filter_by(id=id).first() repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data: repository.data = json.dumps(misc_data) session.commit()", "def get_info(id, repository_interface=None, use_cache=False, repo_status=True): info_dict = {} misc_data = None if repo_status:", "delete_snapshot_objects(snap_id): pass def insert_snapshot_objects(items, snap_id): with LocalSession() as session: for item in items:", "= info['concurrent_uses'] repository.timeout = info['timeout'] repository.parameters = json.dumps(info['parameters']) session.commit() from resticweb.tools.job_build import JobBuilder", "snap_id): with LocalSession() as session: for item in items: if item.get('mtime'): try: item['mtime']", "repo_id=repository.repo_id, address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name ) return info_dict # returns", "pass return snapshot def get_repository_status(id): repository_interface = get_formatted_repository_interface_from_id(id) status = repository_interface.is_online() if status", "repository_interface.get_snapshot_ls(snap_id) # if repository.cache_repo: # sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface) return object_list else: with LocalSession()", "item.get('ctime'): try: item['ctime'] = parser.parse(item['ctime']) except ValueError: item['ctime'] = None item['created_time'] = item.pop(\"ctime\")", "repository_interface.is_online(): snapshots = repository_interface.get_snapshots() return snapshots if snapshots else {} else: with LocalSession()", "item['mtime'] = parser.parse(item['mtime']) except ValueError: item['mtime'] = None item['modified_time'] = item.pop(\"mtime\") if item.get('atime'):", "resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot import json import traceback from datetime import datetime", "import ResticRepository from .repository_formatted import ResticRepositoryFormatted from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot import", "from the # repository if use_cache is set to False. Returns list of", "repo {repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job() if unsync_db: ''' for snapshot in repository.snapshots:", "if repository.cache_repo: # sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface) return object_list else: with LocalSession() as session:", "resticweb.tools.job_build import JobBuilder if sync_db: job_builder = JobBuilder(job_name=f\"Sync repo {repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full'))", "with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository #", "try: misc_data = json.loads(repository.data) except TypeError: misc_data = dict(data=repository.data) misc_data['status'] = repo_status info_dict", "LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: credential_list = credential_manager.get_group_credentials(repository.credential_group_id) if credential_list:", "in ids: repo_to_remove = session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for", "info['name']) repository.name = info['name'] repository.description = info.get('description') repository.address = info['address'] repository.cache_repo = info['cache_repo']", "os # from resticweb.dictionary.resticweb_constants import Repository as Rep from resticweb.models.general import Repository, Snapshot,", "Repository as Rep from resticweb.models.general import Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType from resticweb.tools.local_session", "use_cache is set to False. Returns list of snapshots from # the database", "session.add(repository) session.commit() return repository.id def update_repository(info, repo_id, sync_db=False, unsync_db=False): with LocalSession() as session:", "item['snap_time']: main_time = item['snap_time'][:-7] extra = item['snap_time'][-6:] main_time = main_time + extra #", "repository: return repository.name else: return None def get_repository_address(id): with LocalSession() as session: repository", "= json.dumps(misc_data) session.commit() else: try: misc_data = json.loads(repository.data) except TypeError: misc_data = dict(data=repository.data)", "session: for id in ids: repo_to_remove = session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters =", "SnapshotObject, JobParameter, RepositoryType from resticweb.tools.local_session import LocalSession from resticweb.misc.credential_manager import credential_manager # from", "instead of the following method # it's located under resticweb.tools.job_callbacks def add_repository(info): with", "ids: repo_to_remove = session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for parameter", "from # the database if use_cache is set to True def get_snapshots(id, use_cache=False):", "snapshot = repository_interface.get_snapshots(snapshot_id)[0] return snapshot if snapshot else {} else: with LocalSession() as", "= credential_manager.get_group_credentials(repository.credential_group_id) if credential_list: repo_password = credential_list.pop('repo_password') respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list if", "# from resticweb.dictionary.resticweb_constants import Repository as Rep from resticweb.models.general import Repository, Snapshot, SnapshotObject,", "not use_cache and repository_interface.is_online(): snapshots = repository_interface.get_snapshots() return snapshots if snapshots else {}", "= parser.parse(item['ctime']) except ValueError: item['ctime'] = None item['created_time'] = item.pop(\"ctime\") new_item = SnapshotObject(", "repo_password, credential_list if len(credential_list) > 0 else None, id) return respository_interface except Exception", "item['modified_time'] = item.pop(\"mtime\") if item.get('atime'): try: item['atime'] = parser.parse(item['atime']) except ValueError: item['atime'] =", "snapshot_object in snapshot_object_list] return snapshot_dict_list def delete_snapshot_objects(snap_id): pass def insert_snapshot_objects(items, snap_id): with LocalSession()", "stats # from the repository itself like the total size and number of", "repository = session.query(Repository).filter_by(id=id).first() if repository: credential_list = credential_manager.get_group_credentials(repository.credential_group_id) if credential_list: repo_password = credential_list.pop('repo_password')", "snapshots = session.query(Snapshot).filter_by(repository_id=id).all() return snapshots def get_snapshot(repo_id, snapshot_id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(repo_id) if", "session: repository = session.query(Repository).filter_by(id=repo_id).first() if repository.name != info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name = info['name']", "snapshot = session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths: try: snapshot.paths = json.loads(snapshot.paths) except ValueError: pass if", "gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id ) session.add(new_item) session.commit() def get_engine_repositories():", "session.query(Repository).filter_by(id=id).first() repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data: repository.data = json.dumps(misc_data) session.commit() else: try: misc_data", "repo_id): with LocalSession() as session: for item in items: item['snap_id'] = item.pop('id') item['snap_short_id']", "parser.parse(main_time) new_snapshot = Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags'))", "repository_interface = get_formatted_repository_interface_from_id(id) repo_status = repository_interface.is_offline() if not use_cache: if not repo_status: misc_data", "repository.address else: return None def get_repository_password(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first()", "use_cache and repository_interface.is_online(): snapshot = repository_interface.get_snapshots(snapshot_id)[0] return snapshot if snapshot else {} else:", "{} else: with LocalSession() as session: snapshots = session.query(Snapshot).filter_by(repository_id=id).all() return snapshots def get_snapshot(repo_id,", "Also grabs the stats # from the repository itself like the total size", "add_repository(info): with LocalSession() as session: repository = Repository( name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'],", "return snapshot if snapshot else {} else: with LocalSession() as session: snapshot =", "item['created_time'] = item.pop(\"ctime\") new_item = SnapshotObject( name=item.get('name'), type=item.get('type'), path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'),", "from repo itself # which might take a bit of time def get_info(id,", "credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name = info['name'] repository.description = info.get('description') repository.address = info['address'] repository.cache_repo =", "the # repository if use_cache is set to False. Returns list of snapshots", "SnapshotObject( name=item.get('name'), type=item.get('type'), path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id", "logging logger = logging.getLogger('debugLogger') # repository_add_to_db is used instead of the following method", "repository.cache_repo = info['cache_repo'] repository.concurrent_uses = info['concurrent_uses'] repository.timeout = info['timeout'] repository.parameters = json.dumps(info['parameters']) session.commit()", "def update_repository(info, repo_id, sync_db=False, unsync_db=False): with LocalSession() as session: repository = session.query(Repository).filter_by(id=repo_id).first() if", "# the database if use_cache is set to True def get_snapshots(id, use_cache=False): repository_interface", "not use_cache and repository_interface.is_online(): # if the repo is online, we can purge", "credential_groups.append(repo_to_remove.credential_group_id) job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for parameter in job_parameters: parameter.param_value = None session.delete(repo_to_remove)", "for item in items: item['snap_id'] = item.pop('id') item['snap_short_id'] = item.pop('short_id') item['snap_time'] = item.pop('time')", "= session.query(Repository).filter_by(id=repo_id).first() if repository.name != info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name = info['name'] repository.description =", "\"Offline\" def get_repository_name(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return", "else: return \"Offline\" def get_repository_name(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if", "repo_to_remove = session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for parameter in", "= session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository # gets basic info about the", "= repo_status info_dict = dict( id=repository.id, name=repository.name, description=repository.description, repo_id=repository.repo_id, address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout,", "use_cache is set to False then the repo stats are grabbed from repo", "take a bit of time def get_info(id, repository_interface=None, use_cache=False, repo_status=True): info_dict = {}", "= item.pop(\"atime\") if item.get('ctime'): try: item['ctime'] = parser.parse(item['ctime']) except ValueError: item['ctime'] = None", "= repository_interface.get_snapshot_ls(snap_id) # if repository.cache_repo: # sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface) return object_list else: with", "snapshots from # the database if use_cache is set to True def get_snapshots(id,", "type=item.get('type'), path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id ) session.add(new_item)", "{} else: with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return snapshot def", "else {} else: with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return snapshot", "import ResticRepositoryFormatted from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot import json import traceback from", "repository.parameters = json.dumps(info['parameters']) session.commit() from resticweb.tools.job_build import JobBuilder if sync_db: job_builder = JobBuilder(job_name=f\"Sync", "# returns a list of snapshots and places them into the database from", "None item['accessed_time'] = item.pop(\"atime\") if item.get('ctime'): try: item['ctime'] = parser.parse(item['ctime']) except ValueError: item['ctime']", "else: return None def get_repository_password(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if", "name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') ) session.add(repository) session.commit()", "id in ids: repo_to_remove = session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all()", "repository_list = [] with LocalSession() as session: repositories = session.query(Repository).filter_by() for repository in", "object_list else: with LocalSession() as session: snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list = [snapshot_object.to_dict() for", "if snapshot.tags: try: snapshot.tags = json.loads(snapshot.tags) except ValueError: pass return snapshot def get_repository_status(id):", "not repository_interface: repository_interface = get_formatted_repository_interface_from_id(id) repo_status = repository_interface.is_offline() if not use_cache: if not", "and places them into the database from the # repository if use_cache is", "name=repository.name, description=repository.description, repo_id=repository.repo_id, address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name ) return info_dict", "else: with LocalSession() as session: snapshots = session.query(Snapshot).filter_by(repository_id=id).all() return snapshots def get_snapshot(repo_id, snapshot_id,", "= json.dumps(info['parameters']) session.commit() from resticweb.tools.job_build import JobBuilder if sync_db: job_builder = JobBuilder(job_name=f\"Sync repo", "= session.query(Snapshot).filter_by(repository_id=id).all() return snapshots def get_snapshot(repo_id, snapshot_id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(repo_id) if not", "session.commit() def delete_snapshot(repo_id, snapshot_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot)", "repo_id, sync_db=False, unsync_db=False): with LocalSession() as session: repository = session.query(Repository).filter_by(id=repo_id).first() if repository.name !=", "repository_interface.is_online(): snapshot = repository_interface.get_snapshots(snapshot_id)[0] return snapshot if snapshot else {} else: with LocalSession()", "get_snapshot_info(id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths: try: snapshot.paths =", "repository.name else: return None def get_repository_address(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first()", "credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') ) session.add(repository) session.commit() return repository.id def update_repository(info, repo_id, sync_db=False,", "= item['snap_time'][-6:] main_time = main_time + extra # item['snap_time'] = datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time']", "credential_list if len(credential_list) > 0 else None, id) return respository_interface except Exception as", "session: repository = session.query(Repository).filter_by(id=id).first() if repository: credential_list = credential_manager.get_group_credentials(repository.credential_group_id) if credential_list: repo_password =", "returns a list of snapshots and places them into the database from the", "if repository: return repository.name else: return None def get_repository_address(id): with LocalSession() as session:", "if snapshot.paths: try: snapshot.paths = json.loads(snapshot.paths) except ValueError: pass if snapshot.tags: try: snapshot.tags", "snapshot_dict_list def delete_snapshot_objects(snap_id): pass def insert_snapshot_objects(items, snap_id): with LocalSession() as session: for item", "repo stats are grabbed from repo itself # which might take a bit", "for id in credential_groups: credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id): with LocalSession() as session: snapshot =", "use_cache=False): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface =", "address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name ) return info_dict # returns a", "except ValueError: item['ctime'] = None item['created_time'] = item.pop(\"ctime\") new_item = SnapshotObject( name=item.get('name'), type=item.get('type'),", "like the total size and number of files. # if use_cache is set", "and repository_interface.is_online(): # if the repo is online, we can purge the snapshots", "for repository in repositories: repository_list.append((repository.id, repository.name)) return repository_list def get_snapshot_info(id): with LocalSession() as", "as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.address else: return None def", "info['cache_repo'] repository.concurrent_uses = info['concurrent_uses'] repository.timeout = info['timeout'] repository.parameters = json.dumps(info['parameters']) session.commit() from resticweb.tools.job_build", "snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) ) session.add(new_snapshot) session.commit() def", "if repo_status: if not repository_interface: repository_interface = get_formatted_repository_interface_from_id(id) repo_status = repository_interface.is_offline() if not", "= [snapshot_object.to_dict() for snapshot_object in snapshot_object_list] return snapshot_dict_list def delete_snapshot_objects(snap_id): pass def insert_snapshot_objects(items,", "of files. # if use_cache is set to False then the repo stats", "session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id) if not use_cache and repository_interface.is_online(): #", "def delete_snapshot_objects(snap_id): pass def insert_snapshot_objects(items, snap_id): with LocalSession() as session: for item in", "as session: repository = session.query(Repository).filter_by(id=id).first() if repository: credential_list = credential_manager.get_group_credentials(repository.credential_group_id) if credential_list: repo_password", "= session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data: repository.data = json.dumps(misc_data) session.commit() else: try: misc_data = json.loads(repository.data)", "session: for item in items: if item.get('mtime'): try: item['mtime'] = parser.parse(item['mtime']) except ValueError:", "with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths: try: snapshot.paths = json.loads(snapshot.paths)", "repo_status=True): info_dict = {} misc_data = None if repo_status: if not repository_interface: repository_interface", "from the repository itself like the total size and number of files. #", "session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object in snapshot_object_list] return snapshot_dict_list def delete_snapshot_objects(snap_id): pass", "= session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository # gets basic info about the repository from the", "object_list = repository_interface.get_snapshot_ls(snap_id) # if repository.cache_repo: # sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface) return object_list else:", "snapshot_object_list] return snapshot_dict_list def delete_snapshot_objects(snap_id): pass def insert_snapshot_objects(items, snap_id): with LocalSession() as session:", "return None def get_formatted_repository_interface_from_id(id): try: with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if", "resticweb.models.general import Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType from resticweb.tools.local_session import LocalSession from resticweb.misc.credential_manager", "\"Online\" else: return \"Offline\" def get_repository_name(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first()", "not use_cache: if not repo_status: misc_data = repository_interface.get_stats() with LocalSession() as session: repository", "parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') ) session.add(repository) session.commit() return repository.id def update_repository(info,", "itself # which might take a bit of time def get_info(id, repository_interface=None, use_cache=False,", "snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit() def get_snapshot_objects(snap_id, use_cache=False): with LocalSession() as session:", "= datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] = parser.parse(main_time) new_snapshot = Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'),", "parser import logging logger = logging.getLogger('debugLogger') # repository_add_to_db is used instead of the", "repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job() return repo_id def delete_repositories(ids): credential_groups = [] with", "JobBuilder(job_name=f'Clear db from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job() return repo_id def delete_repositories(ids): credential_groups", "database if use_cache is set to True def get_snapshots(id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(id)", "and repository_interface.is_online(): snapshot = repository_interface.get_snapshots(snapshot_id)[0] return snapshot if snapshot else {} else: with", "get_repository_name(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.name else:", "use_cache is set to True def get_snapshots(id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(id) snapshots =", "= repository_interface.get_snapshots() return snapshots if snapshots else {} else: with LocalSession() as session:", "info_dict = dict( id=repository.id, name=repository.name, description=repository.description, repo_id=repository.repo_id, address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo,", "with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.address else: return", "with LocalSession() as session: repository = Repository( name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'], data=info.get('data'),", "= session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id) if not use_cache and repository_interface.is_online():", "repository_interface=None, use_cache=False, repo_status=True): info_dict = {} misc_data = None if repo_status: if not", "get_info(id, repository_interface=None, use_cache=False, repo_status=True): info_dict = {} misc_data = None if repo_status: if", "session.delete(snapshot) session.commit() def get_snapshot_objects(snap_id, use_cache=False): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository", "def get_repository_name(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.name", "snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return snapshot def insert_snapshots(items, repo_id): with LocalSession() as session:", "dict(data=repository.data) misc_data['status'] = repo_status info_dict = dict( id=repository.id, name=repository.name, description=repository.description, repo_id=repository.repo_id, address=repository.address, repository_data=repository.data,", "re-add them fresh from the actual repo object_list = repository_interface.get_snapshot_ls(snap_id) # if repository.cache_repo:", "def get_engine_repositories(): repository_list = [] with LocalSession() as session: repositories = session.query(Repository).filter_by() for", "is None: return \"Couldn't get status\" else: if status: return \"Online\" else: return", "job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job() return repo_id def delete_repositories(ids): credential_groups = [] with LocalSession() as", "grabs the stats # from the repository itself like the total size and", "in items: if item.get('mtime'): try: item['mtime'] = parser.parse(item['mtime']) except ValueError: item['mtime'] = None", "return None def get_repository_password(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository:", "len(credential_list) > 0 else None, id) return respository_interface except Exception as e: logger.error(e)", "repo_status: if not repository_interface: repository_interface = get_formatted_repository_interface_from_id(id) repo_status = repository_interface.is_offline() if not use_cache:", "size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id ) session.add(new_item) session.commit() def get_engine_repositories(): repository_list", "main_time = item['snap_time'][:-7] extra = item['snap_time'][-6:] main_time = main_time + extra # item['snap_time']", "stats are grabbed from repo itself # which might take a bit of", "resticweb.misc.credential_manager import credential_manager # from .repository import ResticRepository from .repository_formatted import ResticRepositoryFormatted from", "session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return snapshot def insert_snapshots(items, repo_id): with LocalSession() as", "+ extra # item['snap_time'] = datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] = parser.parse(main_time) new_snapshot = Snapshot(", "as session: snapshot = session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths: try: snapshot.paths = json.loads(snapshot.paths) except ValueError:", "session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id) if not use_cache", "session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit() def get_snapshot_objects(snap_id, use_cache=False): with LocalSession() as session: snapshot =", "import datetime from resticweb.dateutil import parser import logging logger = logging.getLogger('debugLogger') # repository_add_to_db", "repository_list.append((repository.id, repository.name)) return repository_list def get_snapshot_info(id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=id).first()", "use_cache=False, repo_status=True): info_dict = {} misc_data = None if repo_status: if not repository_interface:", ".repository_formatted import ResticRepositoryFormatted from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot import json import traceback", "# from .repository import ResticRepository from .repository_formatted import ResticRepositoryFormatted from resticweb.tools.repository_tools import sync_snapshots,", "address=info['address'], parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') ) session.add(repository) session.commit() return repository.id def", "return snapshot_dict_list def delete_snapshot_objects(snap_id): pass def insert_snapshot_objects(items, snap_id): with LocalSession() as session: for", "snapshots and places them into the database from the # repository if use_cache", "if snapshots else {} else: with LocalSession() as session: snapshots = session.query(Snapshot).filter_by(repository_id=id).all() return", "with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.name else: return", "from the database. Also grabs the stats # from the repository itself like", "job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for parameter in job_parameters: parameter.param_value = None session.delete(repo_to_remove) session.commit()", "the repo stats are grabbed from repo itself # which might take a", "= session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths: try: snapshot.paths = json.loads(snapshot.paths) except ValueError: pass if snapshot.tags:", "session.query(Repository).filter_by(id=id).first() if repository: credential_list = credential_manager.get_group_credentials(repository.credential_group_id) if credential_list: repo_password = credential_list.pop('repo_password') respository_interface =", "of time def get_info(id, repository_interface=None, use_cache=False, repo_status=True): info_dict = {} misc_data = None", "resticweb.tools.job_callbacks def add_repository(info): with LocalSession() as session: repository = Repository( name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'),", "session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for parameter in job_parameters: parameter.param_value", "= JobBuilder(job_name=f\"Sync repo {repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job() if unsync_db: ''' for snapshot", "just re-add them fresh from the actual repo object_list = repository_interface.get_snapshot_ls(snap_id) # if", "use_cache and repository_interface.is_online(): # if the repo is online, we can purge the", "return snapshot def insert_snapshots(items, repo_id): with LocalSession() as session: for item in items:", "as session: for id in ids: repo_to_remove = session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters", "= session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return snapshot def insert_snapshots(items, repo_id): with LocalSession() as session: for", "mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id ) session.add(new_item) session.commit() def get_engine_repositories(): repository_list =", "from datetime import datetime from resticweb.dateutil import parser import logging logger = logging.getLogger('debugLogger')", "[snapshot_object.to_dict() for snapshot_object in snapshot_object_list] return snapshot_dict_list def delete_snapshot_objects(snap_id): pass def insert_snapshot_objects(items, snap_id):", "ValueError: pass if snapshot.tags: try: snapshot.tags = json.loads(snapshot.tags) except ValueError: pass return snapshot", "repository_type=repository_type.name ) return info_dict # returns a list of snapshots and places them", "ResticRepository from .repository_formatted import ResticRepositoryFormatted from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot import json", "else: with LocalSession() as session: snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object", "in credential_groups: credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository", "if len(credential_list) > 0 else None, id) return respository_interface except Exception as e:", "items: if item.get('mtime'): try: item['mtime'] = parser.parse(item['mtime']) except ValueError: item['mtime'] = None item['modified_time']", "with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else:", "= None item['modified_time'] = item.pop(\"mtime\") if item.get('atime'): try: item['atime'] = parser.parse(item['atime']) except ValueError:", "for parameter in job_parameters: parameter.param_value = None session.delete(repo_to_remove) session.commit() for id in credential_groups:", "get_repository_password(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return credential_manager.get_credential(repository.credential_group_id, \"repo_password\")", "description=repository.description, repo_id=repository.repo_id, address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name ) return info_dict #", "new_snapshot = Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) )", "repo_id def delete_repositories(ids): credential_groups = [] with LocalSession() as session: for id in", "with LocalSession() as session: snapshots = session.query(Snapshot).filter_by(repository_id=id).all() return snapshots def get_snapshot(repo_id, snapshot_id, use_cache=False):", "snapshot_id=snap_id ) session.add(new_item) session.commit() def get_engine_repositories(): repository_list = [] with LocalSession() as session:", "parser.parse(item['mtime']) except ValueError: item['mtime'] = None item['modified_time'] = item.pop(\"mtime\") if item.get('atime'): try: item['atime']", "places them into the database from the # repository if use_cache is set", "from .repository_formatted import ResticRepositoryFormatted from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot import json import", "item.pop(\"ctime\") new_item = SnapshotObject( name=item.get('name'), type=item.get('type'), path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'),", "with LocalSession() as session: for id in ids: repo_to_remove = session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id)", "session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.address else: return None def get_repository_password(id):", "[] if not use_cache and repository_interface.is_online(): snapshots = repository_interface.get_snapshots() return snapshots if snapshots", "id=repository.id, name=repository.name, description=repository.description, repo_id=repository.repo_id, address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name ) return", "then the repo stats are grabbed from repo itself # which might take", "# credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for parameter in job_parameters: parameter.param_value =", "item['snap_time'][:-7] extra = item['snap_time'][-6:] main_time = main_time + extra # item['snap_time'] = datetime.strptime(main_time,", "set to False then the repo stats are grabbed from repo itself #", "repository_interface=repository_interface) return object_list else: with LocalSession() as session: snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list =", "= [] with LocalSession() as session: repositories = session.query(Repository).filter_by() for repository in repositories:", "id) return respository_interface except Exception as e: logger.error(e) logger.error(\"trace:\" + traceback.format_exc()) return None", "json.dumps(misc_data) session.commit() else: try: misc_data = json.loads(repository.data) except TypeError: misc_data = dict(data=repository.data) misc_data['status']", "LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data: repository.data =", "get_repository_address(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.address else:", "following method # it's located under resticweb.tools.job_callbacks def add_repository(info): with LocalSession() as session:", "status = repository_interface.is_online() if status is None: return \"Couldn't get status\" else: if", "for item in items: if item.get('mtime'): try: item['mtime'] = parser.parse(item['mtime']) except ValueError: item['mtime']", "None def get_repository_address(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return", "for snapshot_object in snapshot_object_list] return snapshot_dict_list def delete_snapshot_objects(snap_id): pass def insert_snapshot_objects(items, snap_id): with", "repository_interface.is_online() if status is None: return \"Couldn't get status\" else: if status: return", "LocalSession() as session: snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object in snapshot_object_list]", "the stats # from the repository itself like the total size and number", "as session: repositories = session.query(Repository).filter_by() for repository in repositories: repository_list.append((repository.id, repository.name)) return repository_list", "= None session.delete(repo_to_remove) session.commit() for id in credential_groups: credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id): with LocalSession()", "None def get_repository_password(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return", "def get_snapshot_objects(snap_id, use_cache=False): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first()", "session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths: try: snapshot.paths = json.loads(snapshot.paths) except ValueError: pass if snapshot.tags: try:", "resticweb.tools.local_session import LocalSession from resticweb.misc.credential_manager import credential_manager # from .repository import ResticRepository from", "repository: return repository.address else: return None def get_repository_password(id): with LocalSession() as session: repository", "item['atime'] = parser.parse(item['atime']) except ValueError: item['atime'] = None item['accessed_time'] = item.pop(\"atime\") if item.get('ctime'):", "return object_list else: with LocalSession() as session: snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list = [snapshot_object.to_dict()", "is set to False then the repo stats are grabbed from repo itself", "logging.getLogger('debugLogger') # repository_add_to_db is used instead of the following method # it's located", "repository_interface.is_online(): # if the repo is online, we can purge the snapshots from", "repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.name else: return None def get_repository_address(id): with", "snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) ) session.add(new_snapshot) session.commit() def delete_snapshot(repo_id, snapshot_id):", "db from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job() return repo_id def delete_repositories(ids): credential_groups =", "we will # just re-add them fresh from the actual repo object_list =", "session.query(Repository).filter_by(id=id).first() if repository: return credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else: return None def get_formatted_repository_interface_from_id(id): try: with", "logger = logging.getLogger('debugLogger') # repository_add_to_db is used instead of the following method #", "LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit() def get_snapshot_objects(snap_id, use_cache=False): with", "as session: repository = Repository( name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'],", "session: repository = Repository( name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'),", "JobBuilder if sync_db: job_builder = JobBuilder(job_name=f\"Sync repo {repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job() if", "except ValueError: pass return snapshot def get_repository_status(id): repository_interface = get_formatted_repository_interface_from_id(id) status = repository_interface.is_online()", "the snapshots from db as we will # just re-add them fresh from", "= json.loads(snapshot.paths) except ValueError: pass if snapshot.tags: try: snapshot.tags = json.loads(snapshot.tags) except ValueError:", "list of snapshots and places them into the database from the # repository", "as session: repository = session.query(Repository).filter_by(id=id).first() repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data: repository.data = json.dumps(misc_data)", "get_formatted_repository_interface_from_id(id) snapshots = [] if not use_cache and repository_interface.is_online(): snapshots = repository_interface.get_snapshots() return", "snapshot def insert_snapshots(items, repo_id): with LocalSession() as session: for item in items: item['snap_id']", "insert_snapshots(items, repo_id): with LocalSession() as session: for item in items: item['snap_id'] = item.pop('id')", "def insert_snapshot_objects(items, snap_id): with LocalSession() as session: for item in items: if item.get('mtime'):", "= repository_interface.get_snapshots(snapshot_id)[0] return snapshot if snapshot else {} else: with LocalSession() as session:", "we can purge the snapshots from db as we will # just re-add", "snapshot.snapshot_objects = [] session.commit() ''' job_builder = JobBuilder(job_name=f'Clear db from repo {repository.name}', job_class='clear_snapshot_objects',", "username=item.get('username'), tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) ) session.add(new_snapshot) session.commit() def delete_snapshot(repo_id, snapshot_id): with LocalSession()", "= main_time + extra # item['snap_time'] = datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] = parser.parse(main_time) new_snapshot", "session: repository = session.query(Repository).filter_by(id=id).first() if repository: return credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else: return None def", "datetime import datetime from resticweb.dateutil import parser import logging logger = logging.getLogger('debugLogger') #", "job_parameters: parameter.param_value = None session.delete(repo_to_remove) session.commit() for id in credential_groups: credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id):", "datetime from resticweb.dateutil import parser import logging logger = logging.getLogger('debugLogger') # repository_add_to_db is", ") session.add(new_snapshot) session.commit() def delete_snapshot(repo_id, snapshot_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id,", "session.commit() def get_engine_repositories(): repository_list = [] with LocalSession() as session: repositories = session.query(Repository).filter_by()", "job_builder.run_job() if unsync_db: ''' for snapshot in repository.snapshots: snapshot.snapshot_objects = [] session.commit() '''", "LocalSession() as session: for id in ids: repo_to_remove = session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id)", "use_cache=False): repository_interface = get_formatted_repository_interface_from_id(repo_id) if not use_cache and repository_interface.is_online(): snapshot = repository_interface.get_snapshots(snapshot_id)[0] return", "= session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit() def get_snapshot_objects(snap_id, use_cache=False): with LocalSession() as session: snapshot", "get_snapshot_objects(snap_id, use_cache=False): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface", "return snapshot def get_repository_status(id): repository_interface = get_formatted_repository_interface_from_id(id) status = repository_interface.is_online() if status is", "session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository # gets basic info about the repository from the database.", "item['ctime'] = None item['created_time'] = item.pop(\"ctime\") new_item = SnapshotObject( name=item.get('name'), type=item.get('type'), path=item.get('path'), uid=item.get('uid'),", "repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository # gets basic info about the repository from", "tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) ) session.add(new_snapshot) session.commit() def delete_snapshot(repo_id, snapshot_id): with LocalSession() as", "repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name ) return info_dict # returns a list", "repository = session.query(Repository).filter_by(id=id).first() repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data: repository.data = json.dumps(misc_data) session.commit() else:", "cache_repo=repository.cache_repo, repository_type=repository_type.name ) return info_dict # returns a list of snapshots and places", ".repository import ResticRepository from .repository_formatted import ResticRepositoryFormatted from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot", "if repository: return credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else: return None def get_formatted_repository_interface_from_id(id): try: with LocalSession()", "item['snap_time'][-6:] main_time = main_time + extra # item['snap_time'] = datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] =", "ResticRepositoryFormatted from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects, sync_single_snapshot import json import traceback from datetime", "method # it's located under resticweb.tools.job_callbacks def add_repository(info): with LocalSession() as session: repository", "RepositoryType from resticweb.tools.local_session import LocalSession from resticweb.misc.credential_manager import credential_manager # from .repository import", "is set to True def get_snapshots(id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(id) snapshots = []", "items: item['snap_id'] = item.pop('id') item['snap_short_id'] = item.pop('short_id') item['snap_time'] = item.pop('time') if item['snap_time']: main_time", "LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.address else: return None", "= get_formatted_repository_interface_from_id(id) status = repository_interface.is_online() if status is None: return \"Couldn't get status\"", "item['ctime'] = parser.parse(item['ctime']) except ValueError: item['ctime'] = None item['created_time'] = item.pop(\"ctime\") new_item =", "new_item = SnapshotObject( name=item.get('name'), type=item.get('type'), path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'),", "= item.pop(\"mtime\") if item.get('atime'): try: item['atime'] = parser.parse(item['atime']) except ValueError: item['atime'] = None", "data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') ) session.add(repository) session.commit() return repository.id def update_repository(info, repo_id,", "None, id) return respository_interface except Exception as e: logger.error(e) logger.error(\"trace:\" + traceback.format_exc()) return", "LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths: try: snapshot.paths = json.loads(snapshot.paths) except", "return \"Couldn't get status\" else: if status: return \"Online\" else: return \"Offline\" def", "item['snap_short_id'] = item.pop('short_id') item['snap_time'] = item.pop('time') if item['snap_time']: main_time = item['snap_time'][:-7] extra =", "item.get('atime'): try: item['atime'] = parser.parse(item['atime']) except ValueError: item['atime'] = None item['accessed_time'] = item.pop(\"atime\")", "= ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list) > 0 else None, id) return respository_interface", "[] with LocalSession() as session: for id in ids: repo_to_remove = session.query(Repository).filter_by(id=id).first() #", "info_dict # returns a list of snapshots and places them into the database", "list of snapshots from # the database if use_cache is set to True", "item.get('mtime'): try: item['mtime'] = parser.parse(item['mtime']) except ValueError: item['mtime'] = None item['modified_time'] = item.pop(\"mtime\")", "if not repository_interface: repository_interface = get_formatted_repository_interface_from_id(id) repo_status = repository_interface.is_offline() if not use_cache: if", "Rep from resticweb.models.general import Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType from resticweb.tools.local_session import LocalSession", "about the repository from the database. Also grabs the stats # from the", "a bit of time def get_info(id, repository_interface=None, use_cache=False, repo_status=True): info_dict = {} misc_data", "= item.pop('time') if item['snap_time']: main_time = item['snap_time'][:-7] extra = item['snap_time'][-6:] main_time = main_time", "# gets basic info about the repository from the database. Also grabs the", "snap_id, repository_interface=repository_interface) return object_list else: with LocalSession() as session: snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list", "= repository_interface.is_online() if status is None: return \"Couldn't get status\" else: if status:", "snapshots if snapshots else {} else: with LocalSession() as session: snapshots = session.query(Snapshot).filter_by(repository_id=id).all()", "= parser.parse(item['atime']) except ValueError: item['atime'] = None item['accessed_time'] = item.pop(\"atime\") if item.get('ctime'): try:", "sync_type='full')) job_builder.run_job() if unsync_db: ''' for snapshot in repository.snapshots: snapshot.snapshot_objects = [] session.commit()", "= parser.parse(item['mtime']) except ValueError: item['mtime'] = None item['modified_time'] = item.pop(\"mtime\") if item.get('atime'): try:", "insert_snapshot_objects(items, snap_id): with LocalSession() as session: for item in items: if item.get('mtime'): try:", "credential_list: repo_password = credential_list.pop('repo_password') respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list) > 0", "will # just re-add them fresh from the actual repo object_list = repository_interface.get_snapshot_ls(snap_id)", "repository if use_cache is set to False. Returns list of snapshots from #", "= session.query(Repository).filter_by(id=id).first() repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data: repository.data = json.dumps(misc_data) session.commit() else: try:", "the repository itself like the total size and number of files. # if", "parameter in job_parameters: parameter.param_value = None session.delete(repo_to_remove) session.commit() for id in credential_groups: credential_manager.remove_credentials(id)", "sync_snapshot_objects, sync_single_snapshot import json import traceback from datetime import datetime from resticweb.dateutil import", "job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job() if unsync_db: ''' for snapshot in repository.snapshots: snapshot.snapshot_objects =", "LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository # gets", "return info_dict # returns a list of snapshots and places them into the", "accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id ) session.add(new_item) session.commit() def get_engine_repositories(): repository_list = [] with LocalSession()", "repo_password = credential_list.pop('repo_password') respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list) > 0 else", "# if the repo is online, we can purge the snapshots from db", "json.loads(snapshot.tags) except ValueError: pass return snapshot def get_repository_status(id): repository_interface = get_formatted_repository_interface_from_id(id) status =", "actual repo object_list = repository_interface.get_snapshot_ls(snap_id) # if repository.cache_repo: # sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface) return", "Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType from resticweb.tools.local_session import LocalSession from resticweb.misc.credential_manager import credential_manager", "= session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object in snapshot_object_list] return snapshot_dict_list def delete_snapshot_objects(snap_id):", "pass if snapshot.tags: try: snapshot.tags = json.loads(snapshot.tags) except ValueError: pass return snapshot def", "of the following method # it's located under resticweb.tools.job_callbacks def add_repository(info): with LocalSession()", "uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id ) session.add(new_item) session.commit() def", "json import traceback from datetime import datetime from resticweb.dateutil import parser import logging", "else: return None def get_repository_address(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if", "if use_cache is set to True def get_snapshots(id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(id) snapshots", "except ValueError: pass if snapshot.tags: try: snapshot.tags = json.loads(snapshot.tags) except ValueError: pass return", "sync_db: job_builder = JobBuilder(job_name=f\"Sync repo {repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job() if unsync_db: '''", "session: snapshot = session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths: try: snapshot.paths = json.loads(snapshot.paths) except ValueError: pass", "item['snap_time'] = datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] = parser.parse(main_time) new_snapshot = Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'),", "snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id) if not use_cache and", "which might take a bit of time def get_info(id, repository_interface=None, use_cache=False, repo_status=True): info_dict", "repository.id def update_repository(info, repo_id, sync_db=False, unsync_db=False): with LocalSession() as session: repository = session.query(Repository).filter_by(id=repo_id).first()", "credential_list.pop('repo_password') respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list) > 0 else None, id)", "the database. Also grabs the stats # from the repository itself like the", "the total size and number of files. # if use_cache is set to", "info.get('description') repository.address = info['address'] repository.cache_repo = info['cache_repo'] repository.concurrent_uses = info['concurrent_uses'] repository.timeout = info['timeout']", "item['snap_time'] = item.pop('time') if item['snap_time']: main_time = item['snap_time'][:-7] extra = item['snap_time'][-6:] main_time =", "name=item.get('name'), type=item.get('type'), path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'), mode=item.get('mode'), struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id )", "credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters = session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for parameter in job_parameters: parameter.param_value = None", "in repositories: repository_list.append((repository.id, repository.name)) return repository_list def get_snapshot_info(id): with LocalSession() as session: snapshot", "set to False. Returns list of snapshots from # the database if use_cache", "return credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else: return None def get_formatted_repository_interface_from_id(id): try: with LocalSession() as session:", "def get_snapshots(id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(id) snapshots = [] if not use_cache and", "info['concurrent_uses'] repository.timeout = info['timeout'] repository.parameters = json.dumps(info['parameters']) session.commit() from resticweb.tools.job_build import JobBuilder if", "if not use_cache and repository_interface.is_online(): # if the repo is online, we can", "\"repo_password\") else: return None def get_formatted_repository_interface_from_id(id): try: with LocalSession() as session: repository =", "of snapshots and places them into the database from the # repository if", "= session.query(Repository).filter_by(id=id).first() if repository: return repository.name else: return None def get_repository_address(id): with LocalSession()", "with LocalSession() as session: snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object in", "datetime.strptime(main_time, \"%Y-%m-%dT%H:%M:%S.%f%z\") item['snap_time'] = parser.parse(main_time) new_snapshot = Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'),", "misc_data = dict(data=repository.data) misc_data['status'] = repo_status info_dict = dict( id=repository.id, name=repository.name, description=repository.description, repo_id=repository.repo_id,", "def delete_snapshot(repo_id, snapshot_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit()", "and repository_interface.is_online(): snapshots = repository_interface.get_snapshots() return snapshots if snapshots else {} else: with", "from resticweb.tools.job_build import JobBuilder if sync_db: job_builder = JobBuilder(job_name=f\"Sync repo {repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id,", "session.commit() return repository.id def update_repository(info, repo_id, sync_db=False, unsync_db=False): with LocalSession() as session: repository", "the following method # it's located under resticweb.tools.job_callbacks def add_repository(info): with LocalSession() as", "from db as we will # just re-add them fresh from the actual", "credential_manager # from .repository import ResticRepository from .repository_formatted import ResticRepositoryFormatted from resticweb.tools.repository_tools import", "repo object_list = repository_interface.get_snapshot_ls(snap_id) # if repository.cache_repo: # sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface) return object_list", "if misc_data: repository.data = json.dumps(misc_data) session.commit() else: try: misc_data = json.loads(repository.data) except TypeError:", "as session: for item in items: item['snap_id'] = item.pop('id') item['snap_short_id'] = item.pop('short_id') item['snap_time']", "session.query(Repository).filter_by(id=repo_id).first() if repository.name != info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name = info['name'] repository.description = info.get('description')", "session.query(Snapshot).filter_by(repository_id=id).all() return snapshots def get_snapshot(repo_id, snapshot_id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(repo_id) if not use_cache", "if item.get('mtime'): try: item['mtime'] = parser.parse(item['mtime']) except ValueError: item['mtime'] = None item['modified_time'] =", "repository = session.query(Repository).filter_by(id=id).first() if repository: return credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else: return None def get_formatted_repository_interface_from_id(id):", "job_builder = JobBuilder(job_name=f\"Sync repo {repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job() if unsync_db: ''' for", "except ValueError: item['atime'] = None item['accessed_time'] = item.pop(\"atime\") if item.get('ctime'): try: item['ctime'] =", "with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: credential_list = credential_manager.get_group_credentials(repository.credential_group_id) if", "session.commit() for id in credential_groups: credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id): with LocalSession() as session: snapshot", "else {} else: with LocalSession() as session: snapshots = session.query(Snapshot).filter_by(repository_id=id).all() return snapshots def", "repository_interface = get_formatted_repository_interface_from_id(id) status = repository_interface.is_online() if status is None: return \"Couldn't get", "session.commit() else: try: misc_data = json.loads(repository.data) except TypeError: misc_data = dict(data=repository.data) misc_data['status'] =", "= {} misc_data = None if repo_status: if not repository_interface: repository_interface = get_formatted_repository_interface_from_id(id)", "repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.address else: return None def get_repository_password(id): with", "if snapshot else {} else: with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first()", "info['timeout'] repository.parameters = json.dumps(info['parameters']) session.commit() from resticweb.tools.job_build import JobBuilder if sync_db: job_builder =", "item.pop(\"mtime\") if item.get('atime'): try: item['atime'] = parser.parse(item['atime']) except ValueError: item['atime'] = None item['accessed_time']", "is online, we can purge the snapshots from db as we will #", "sync_db=False, unsync_db=False): with LocalSession() as session: repository = session.query(Repository).filter_by(id=repo_id).first() if repository.name != info['name']:", "import sync_snapshots, sync_snapshot_objects, sync_single_snapshot import json import traceback from datetime import datetime from", "if item.get('atime'): try: item['atime'] = parser.parse(item['atime']) except ValueError: item['atime'] = None item['accessed_time'] =", "ValueError: pass return snapshot def get_repository_status(id): repository_interface = get_formatted_repository_interface_from_id(id) status = repository_interface.is_online() if", "ValueError: item['mtime'] = None item['modified_time'] = item.pop(\"mtime\") if item.get('atime'): try: item['atime'] = parser.parse(item['atime'])", "with LocalSession() as session: repositories = session.query(Repository).filter_by() for repository in repositories: repository_list.append((repository.id, repository.name))", "get_formatted_repository_interface_from_id(repo_id) if not use_cache and repository_interface.is_online(): snapshot = repository_interface.get_snapshots(snapshot_id)[0] return snapshot if snapshot", "# just re-add them fresh from the actual repo object_list = repository_interface.get_snapshot_ls(snap_id) #", "get_formatted_repository_interface_from_id(id) status = repository_interface.is_online() if status is None: return \"Couldn't get status\" else:", "a list of snapshots and places them into the database from the #", "= session.query(Repository).filter_by() for repository in repositories: repository_list.append((repository.id, repository.name)) return repository_list def get_snapshot_info(id): with", "item['mtime'] = None item['modified_time'] = item.pop(\"mtime\") if item.get('atime'): try: item['atime'] = parser.parse(item['atime']) except", "credential_manager.get_group_credentials(repository.credential_group_id) if credential_list: repo_password = credential_list.pop('repo_password') respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list)", "parser.parse(item['atime']) except ValueError: item['atime'] = None item['accessed_time'] = item.pop(\"atime\") if item.get('ctime'): try: item['ctime']", "if use_cache is set to False then the repo stats are grabbed from", "as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return snapshot def insert_snapshots(items, repo_id): with LocalSession()", "= info['address'] repository.cache_repo = info['cache_repo'] repository.concurrent_uses = info['concurrent_uses'] repository.timeout = info['timeout'] repository.parameters =", "param_value=id).all() for parameter in job_parameters: parameter.param_value = None session.delete(repo_to_remove) session.commit() for id in", "not repo_status: misc_data = repository_interface.get_stats() with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() repository_type", "repositories: repository_list.append((repository.id, repository.name)) return repository_list def get_snapshot_info(id): with LocalSession() as session: snapshot =", "item['accessed_time'] = item.pop(\"atime\") if item.get('ctime'): try: item['ctime'] = parser.parse(item['ctime']) except ValueError: item['ctime'] =", "resticweb.dictionary.resticweb_constants import Repository as Rep from resticweb.models.general import Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType", "item['snap_time'] = parser.parse(main_time) new_snapshot = Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'), repository_id=repo_id,", "the actual repo object_list = repository_interface.get_snapshot_ls(snap_id) # if repository.cache_repo: # sync_snapshot_objects(repository.id, snap_id, repository_interface=repository_interface)", "gets basic info about the repository from the database. Also grabs the stats", "= logging.getLogger('debugLogger') # repository_add_to_db is used instead of the following method # it's", "import JobBuilder if sync_db: job_builder = JobBuilder(job_name=f\"Sync repo {repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job()", "= JobBuilder(job_name=f'Clear db from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job() return repo_id def delete_repositories(ids):", "json.loads(repository.data) except TypeError: misc_data = dict(data=repository.data) misc_data['status'] = repo_status info_dict = dict( id=repository.id,", "item in items: item['snap_id'] = item.pop('id') item['snap_short_id'] = item.pop('short_id') item['snap_time'] = item.pop('time') if", "{repository.name}\", job_class='repository_sync', parameters=dict(repository=repository.id, sync_type='full')) job_builder.run_job() if unsync_db: ''' for snapshot in repository.snapshots: snapshot.snapshot_objects", "[] session.commit() ''' job_builder = JobBuilder(job_name=f'Clear db from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job()", "repository.name)) return repository_list def get_snapshot_info(id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=id).first() if", "as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository # gets basic", "snapshot_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit() def get_snapshot_objects(snap_id,", "snapshot.paths = json.loads(snapshot.paths) except ValueError: pass if snapshot.tags: try: snapshot.tags = json.loads(snapshot.tags) except", "Snapshot, SnapshotObject, JobParameter, RepositoryType from resticweb.tools.local_session import LocalSession from resticweb.misc.credential_manager import credential_manager #", "0 else None, id) return respository_interface except Exception as e: logger.error(e) logger.error(\"trace:\" +", "LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return snapshot def insert_snapshots(items, repo_id): with", "import Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType from resticweb.tools.local_session import LocalSession from resticweb.misc.credential_manager import", "update_repository(info, repo_id, sync_db=False, unsync_db=False): with LocalSession() as session: repository = session.query(Repository).filter_by(id=repo_id).first() if repository.name", "from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id)) job_builder.run_job() return repo_id def delete_repositories(ids): credential_groups = []", "= info['timeout'] repository.parameters = json.dumps(info['parameters']) session.commit() from resticweb.tools.job_build import JobBuilder if sync_db: job_builder", "session.commit() from resticweb.tools.job_build import JobBuilder if sync_db: job_builder = JobBuilder(job_name=f\"Sync repo {repository.name}\", job_class='repository_sync',", "= dict( id=repository.id, name=repository.name, description=repository.description, repo_id=repository.repo_id, address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name", "as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id) if not", "basic info about the repository from the database. Also grabs the stats #", "if credential_list: repo_password = credential_list.pop('repo_password') respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list) >", "import json import traceback from datetime import datetime from resticweb.dateutil import parser import", "timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name ) return info_dict # returns a list of snapshots", "= Snapshot( snap_id=item.get('snap_id'), snap_short_id=item.get('snap_short_id'), snap_time=item.get('snap_time'), hostname=item.get('hostname'), username=item.get('username'), tree=item.get('tree'), repository_id=repo_id, paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) ) session.add(new_snapshot)", "snapshot in repository.snapshots: snapshot.snapshot_objects = [] session.commit() ''' job_builder = JobBuilder(job_name=f'Clear db from", "the repo is online, we can purge the snapshots from db as we", "get_formatted_repository_interface_from_id(id) repo_status = repository_interface.is_offline() if not use_cache: if not repo_status: misc_data = repository_interface.get_stats()", "get_snapshot(repo_id, snapshot_id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(repo_id) if not use_cache and repository_interface.is_online(): snapshot =", "struct_type=item.get('struct_type'), modified_time=item.get('modified_time'), accessed_time=item.get('accessed_time'), created_time=item.get('created_time'), snapshot_id=snap_id ) session.add(new_item) session.commit() def get_engine_repositories(): repository_list = []", "else: try: misc_data = json.loads(repository.data) except TypeError: misc_data = dict(data=repository.data) misc_data['status'] = repo_status", "= session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for parameter in job_parameters: parameter.param_value = None session.delete(repo_to_remove) session.commit() for", "else: with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() return snapshot def insert_snapshots(items,", "parameters=dict(repo_id=repository.id)) job_builder.run_job() return repo_id def delete_repositories(ids): credential_groups = [] with LocalSession() as session:", "= session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id) if not use_cache and repository_interface.is_online(): # if the", "as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else: return None", "if use_cache is set to False. Returns list of snapshots from # the", "as session: for item in items: if item.get('mtime'): try: item['mtime'] = parser.parse(item['mtime']) except", "repository.snapshots: snapshot.snapshot_objects = [] session.commit() ''' job_builder = JobBuilder(job_name=f'Clear db from repo {repository.name}',", "them fresh from the actual repo object_list = repository_interface.get_snapshot_ls(snap_id) # if repository.cache_repo: #", "repository_interface: repository_interface = get_formatted_repository_interface_from_id(id) repo_status = repository_interface.is_offline() if not use_cache: if not repo_status:", "if not use_cache: if not repo_status: misc_data = repository_interface.get_stats() with LocalSession() as session:", "repository in repositories: repository_list.append((repository.id, repository.name)) return repository_list def get_snapshot_info(id): with LocalSession() as session:", "= [] with LocalSession() as session: for id in ids: repo_to_remove = session.query(Repository).filter_by(id=id).first()", "as session: repository = session.query(Repository).filter_by(id=repo_id).first() if repository.name != info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name =", "try: item['ctime'] = parser.parse(item['ctime']) except ValueError: item['ctime'] = None item['created_time'] = item.pop(\"ctime\") new_item", "LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() repository_interface = get_formatted_repository_interface_from_id(snapshot.repository_id) if", "session.query(Snapshot).filter_by(snap_id=snap_id).first() repository = session.query(Repository).filter_by(id=snapshot.repository_id).first() return repository # gets basic info about the repository", "get_repository_status(id): repository_interface = get_formatted_repository_interface_from_id(id) status = repository_interface.is_online() if status is None: return \"Couldn't", "status: return \"Online\" else: return \"Offline\" def get_repository_name(id): with LocalSession() as session: repository", "repository.concurrent_uses = info['concurrent_uses'] repository.timeout = info['timeout'] repository.parameters = json.dumps(info['parameters']) session.commit() from resticweb.tools.job_build import", "except TypeError: misc_data = dict(data=repository.data) misc_data['status'] = repo_status info_dict = dict( id=repository.id, name=repository.name,", "located under resticweb.tools.job_callbacks def add_repository(info): with LocalSession() as session: repository = Repository( name=info['name'],", "for id in ids: repo_to_remove = session.query(Repository).filter_by(id=id).first() # credential_manager.remove_credentials(repo_to_remove.credential_group_id) credential_groups.append(repo_to_remove.credential_group_id) job_parameters = session.query(JobParameter).filter_by(param_name='repository',", "def get_repository_address(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.address", "info_dict = {} misc_data = None if repo_status: if not repository_interface: repository_interface =", "JobParameter, RepositoryType from resticweb.tools.local_session import LocalSession from resticweb.misc.credential_manager import credential_manager # from .repository", "status\" else: if status: return \"Online\" else: return \"Offline\" def get_repository_name(id): with LocalSession()", "session.delete(repo_to_remove) session.commit() for id in credential_groups: credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id): with LocalSession() as session:", "repository_interface.get_stats() with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data:", "None item['created_time'] = item.pop(\"ctime\") new_item = SnapshotObject( name=item.get('name'), type=item.get('type'), path=item.get('path'), uid=item.get('uid'), gid=item.get('gid'), size=item.get('size'),", "= get_formatted_repository_interface_from_id(snapshot.repository_id) if not use_cache and repository_interface.is_online(): # if the repo is online,", "repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first() if misc_data: repository.data = json.dumps(misc_data) session.commit() else: try: misc_data =", "repository: credential_list = credential_manager.get_group_credentials(repository.credential_group_id) if credential_list: repo_password = credential_list.pop('repo_password') respository_interface = ResticRepositoryFormatted(repository.address, repo_password,", "total size and number of files. # if use_cache is set to False", "concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name ) return info_dict # returns a list of", "ValueError: item['ctime'] = None item['created_time'] = item.pop(\"ctime\") new_item = SnapshotObject( name=item.get('name'), type=item.get('type'), path=item.get('path'),", "session: repositories = session.query(Repository).filter_by() for repository in repositories: repository_list.append((repository.id, repository.name)) return repository_list def", "the database from the # repository if use_cache is set to False. Returns", "return \"Online\" else: return \"Offline\" def get_repository_name(id): with LocalSession() as session: repository =", "return None def get_repository_address(id): with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository:", "= info['cache_repo'] repository.concurrent_uses = info['concurrent_uses'] repository.timeout = info['timeout'] repository.parameters = json.dumps(info['parameters']) session.commit() from", "# from the repository itself like the total size and number of files.", "def get_repository_status(id): repository_interface = get_formatted_repository_interface_from_id(id) status = repository_interface.is_online() if status is None: return", "session.query(Repository).filter_by() for repository in repositories: repository_list.append((repository.id, repository.name)) return repository_list def get_snapshot_info(id): with LocalSession()", "session.add(new_item) session.commit() def get_engine_repositories(): repository_list = [] with LocalSession() as session: repositories =", "= item['snap_time'][:-7] extra = item['snap_time'][-6:] main_time = main_time + extra # item['snap_time'] =", "repository.address = info['address'] repository.cache_repo = info['cache_repo'] repository.concurrent_uses = info['concurrent_uses'] repository.timeout = info['timeout'] repository.parameters", "return repository.name else: return None def get_repository_address(id): with LocalSession() as session: repository =", "snapshot.tags = json.loads(snapshot.tags) except ValueError: pass return snapshot def get_repository_status(id): repository_interface = get_formatted_repository_interface_from_id(id)", "credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else: return None def get_formatted_repository_interface_from_id(id): try: with LocalSession() as session: repository", "dict( id=repository.id, name=repository.name, description=repository.description, repo_id=repository.repo_id, address=repository.address, repository_data=repository.data, concurrent_uses=repository.concurrent_uses, timeout=repository.timeout, data=misc_data, cache_repo=repository.cache_repo, repository_type=repository_type.name )", "repository = Repository( name=info['name'], description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout')", "respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list) > 0 else None, id) return", "session: snapshots = session.query(Snapshot).filter_by(repository_id=id).all() return snapshots def get_snapshot(repo_id, snapshot_id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(repo_id)", "= json.loads(snapshot.tags) except ValueError: pass return snapshot def get_repository_status(id): repository_interface = get_formatted_repository_interface_from_id(id) status", "!= info['name']: credential_manager.set_service_id(repository.credential_group_id, info['name']) repository.name = info['name'] repository.description = info.get('description') repository.address = info['address']", "return repository_list def get_snapshot_info(id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=id).first() if snapshot.paths:", "= info.get('description') repository.address = info['address'] repository.cache_repo = info['cache_repo'] repository.concurrent_uses = info['concurrent_uses'] repository.timeout =", "session.add(new_snapshot) session.commit() def delete_snapshot(repo_id, snapshot_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first()", "import Repository as Rep from resticweb.models.general import Repository, Snapshot, SnapshotObject, JobParameter, RepositoryType from", "None def get_formatted_repository_interface_from_id(id): try: with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() if repository:", "if repository: credential_list = credential_manager.get_group_credentials(repository.credential_group_id) if credential_list: repo_password = credential_list.pop('repo_password') respository_interface = ResticRepositoryFormatted(repository.address,", "get_formatted_repository_interface_from_id(snapshot.repository_id) if not use_cache and repository_interface.is_online(): # if the repo is online, we", "in snapshot_object_list] return snapshot_dict_list def delete_snapshot_objects(snap_id): pass def insert_snapshot_objects(items, snap_id): with LocalSession() as", "None item['modified_time'] = item.pop(\"mtime\") if item.get('atime'): try: item['atime'] = parser.parse(item['atime']) except ValueError: item['atime']", "return snapshots def get_snapshot(repo_id, snapshot_id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(repo_id) if not use_cache and", "= get_formatted_repository_interface_from_id(repo_id) if not use_cache and repository_interface.is_online(): snapshot = repository_interface.get_snapshots(snapshot_id)[0] return snapshot if", "snapshots from db as we will # just re-add them fresh from the", "from .repository import ResticRepository from .repository_formatted import ResticRepositoryFormatted from resticweb.tools.repository_tools import sync_snapshots, sync_snapshot_objects,", "return snapshots if snapshots else {} else: with LocalSession() as session: snapshots =", "True def get_snapshots(id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(id) snapshots = [] if not use_cache", "snap_short_id=snapshot_id).first() return snapshot def insert_snapshots(items, repo_id): with LocalSession() as session: for item in", "LocalSession() as session: repositories = session.query(Repository).filter_by() for repository in repositories: repository_list.append((repository.id, repository.name)) return", "are grabbed from repo itself # which might take a bit of time", "misc_data = repository_interface.get_stats() with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() repository_type = session.query(RepositoryType).filter_by(id=repository.repository_type_id).first()", "is used instead of the following method # it's located under resticweb.tools.job_callbacks def", "repository.data = json.dumps(misc_data) session.commit() else: try: misc_data = json.loads(repository.data) except TypeError: misc_data =", "repository_interface.get_snapshots(snapshot_id)[0] return snapshot if snapshot else {} else: with LocalSession() as session: snapshot", "json.dumps(info['parameters']) session.commit() from resticweb.tools.job_build import JobBuilder if sync_db: job_builder = JobBuilder(job_name=f\"Sync repo {repository.name}\",", ") session.add(new_item) session.commit() def get_engine_repositories(): repository_list = [] with LocalSession() as session: repositories", "timeout=info.get('timeout') ) session.add(repository) session.commit() return repository.id def update_repository(info, repo_id, sync_db=False, unsync_db=False): with LocalSession()", "purge the snapshots from db as we will # just re-add them fresh", "delete_snapshot(repo_id, snapshot_id): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(repository_id=repo_id, snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit() def", "repo_status: misc_data = repository_interface.get_stats() with LocalSession() as session: repository = session.query(Repository).filter_by(id=id).first() repository_type =", "unsync_db: ''' for snapshot in repository.snapshots: snapshot.snapshot_objects = [] session.commit() ''' job_builder =", "session.commit() def get_snapshot_objects(snap_id, use_cache=False): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first() repository =", "repository: return credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else: return None def get_formatted_repository_interface_from_id(id): try: with LocalSession() as", "if not use_cache and repository_interface.is_online(): snapshot = repository_interface.get_snapshots(snapshot_id)[0] return snapshot if snapshot else", "snapshot.paths: try: snapshot.paths = json.loads(snapshot.paths) except ValueError: pass if snapshot.tags: try: snapshot.tags =", "json.loads(snapshot.paths) except ValueError: pass if snapshot.tags: try: snapshot.tags = json.loads(snapshot.tags) except ValueError: pass", "= session.query(Repository).filter_by(id=id).first() if repository: return credential_manager.get_credential(repository.credential_group_id, \"repo_password\") else: return None def get_formatted_repository_interface_from_id(id): try:", "credential_list = credential_manager.get_group_credentials(repository.credential_group_id) if credential_list: repo_password = credential_list.pop('repo_password') respository_interface = ResticRepositoryFormatted(repository.address, repo_password, credential_list", "as session: repository = session.query(Repository).filter_by(id=id).first() if repository: return repository.name else: return None def", "online, we can purge the snapshots from db as we will # just", "ResticRepositoryFormatted(repository.address, repo_password, credential_list if len(credential_list) > 0 else None, id) return respository_interface except", "use_cache and repository_interface.is_online(): snapshots = repository_interface.get_snapshots() return snapshots if snapshots else {} else:", "import os # from resticweb.dictionary.resticweb_constants import Repository as Rep from resticweb.models.general import Repository,", "for snapshot in repository.snapshots: snapshot.snapshot_objects = [] session.commit() ''' job_builder = JobBuilder(job_name=f'Clear db", "job_builder.run_job() return repo_id def delete_repositories(ids): credential_groups = [] with LocalSession() as session: for", "the database if use_cache is set to True def get_snapshots(id, use_cache=False): repository_interface =", "can purge the snapshots from db as we will # just re-add them", "them into the database from the # repository if use_cache is set to", "if the repo is online, we can purge the snapshots from db as", "session: for item in items: item['snap_id'] = item.pop('id') item['snap_short_id'] = item.pop('short_id') item['snap_time'] =", "misc_data: repository.data = json.dumps(misc_data) session.commit() else: try: misc_data = json.loads(repository.data) except TypeError: misc_data", "session: snapshot_object_list = session.query(SnapshotObject).filter_by(snapshot_id=snap_id).all() snapshot_dict_list = [snapshot_object.to_dict() for snapshot_object in snapshot_object_list] return snapshot_dict_list", "description=info.get('description'), repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') ) session.add(repository) session.commit() return", "= [] session.commit() ''' job_builder = JobBuilder(job_name=f'Clear db from repo {repository.name}', job_class='clear_snapshot_objects', parameters=dict(repo_id=repository.id))", "snapshot_id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(repo_id) if not use_cache and repository_interface.is_online(): snapshot = repository_interface.get_snapshots(snapshot_id)[0]", ") return info_dict # returns a list of snapshots and places them into", "= item.pop('id') item['snap_short_id'] = item.pop('short_id') item['snap_time'] = item.pop('time') if item['snap_time']: main_time = item['snap_time'][:-7]", "paths=json.dumps(item.get('paths')), tags=json.dumps(item.get('tags')) ) session.add(new_snapshot) session.commit() def delete_snapshot(repo_id, snapshot_id): with LocalSession() as session: snapshot", "import traceback from datetime import datetime from resticweb.dateutil import parser import logging logger", "session.query(JobParameter).filter_by(param_name='repository', param_value=id).all() for parameter in job_parameters: parameter.param_value = None session.delete(repo_to_remove) session.commit() for id", "= get_formatted_repository_interface_from_id(id) snapshots = [] if not use_cache and repository_interface.is_online(): snapshots = repository_interface.get_snapshots()", "None session.delete(repo_to_remove) session.commit() for id in credential_groups: credential_manager.remove_credentials(id) def get_repository_from_snap_id(snap_id): with LocalSession() as", "the repository from the database. Also grabs the stats # from the repository", "pass def insert_snapshot_objects(items, snap_id): with LocalSession() as session: for item in items: if", "snap_short_id=snapshot_id).first() session.delete(snapshot) session.commit() def get_snapshot_objects(snap_id, use_cache=False): with LocalSession() as session: snapshot = session.query(Snapshot).filter_by(snap_id=snap_id).first()", "misc_data = json.loads(repository.data) except TypeError: misc_data = dict(data=repository.data) misc_data['status'] = repo_status info_dict =", "credential_groups = [] with LocalSession() as session: for id in ids: repo_to_remove =", "import logging logger = logging.getLogger('debugLogger') # repository_add_to_db is used instead of the following", "item.pop('id') item['snap_short_id'] = item.pop('short_id') item['snap_time'] = item.pop('time') if item['snap_time']: main_time = item['snap_time'][:-7] extra", "snapshots def get_snapshot(repo_id, snapshot_id, use_cache=False): repository_interface = get_formatted_repository_interface_from_id(repo_id) if not use_cache and repository_interface.is_online():", "LocalSession() as session: for item in items: if item.get('mtime'): try: item['mtime'] = parser.parse(item['mtime'])", "repository.timeout = info['timeout'] repository.parameters = json.dumps(info['parameters']) session.commit() from resticweb.tools.job_build import JobBuilder if sync_db:", "False then the repo stats are grabbed from repo itself # which might", "repo_id=info.get('repo_id'), address=info['address'], parameters=info['parameters'], data=info.get('data'), credential_group_id=info.get('credential_group_id'), repository_type_id=info['repository_type_id'], concurrent_uses=info.get('concurrent_uses'), timeout=info.get('timeout') ) session.add(repository) session.commit() return repository.id", "repo itself # which might take a bit of time def get_info(id, repository_interface=None," ]
[ "pull') os.system('git add .') os.system('git commit -m update') foo = pexpect.spawn('git push') foo.expect('.*Username.*:')", "import os import sys def git_expect(repodir, u, p): os.chdir(repodir) os.system('git pull') os.system('git add", "os.system('git commit -m update') foo = pexpect.spawn('git push') foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p) print", "foo.expect('.*ssword:*') foo.sendline(p) print foo.read() def main(argv): git_expect(argv[1], argv[2], argv[3]) if __name__ == '__main__':", "foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p) print foo.read() def main(argv): git_expect(argv[1], argv[2], argv[3]) if __name__", "= pexpect.spawn('git push') foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p) print foo.read() def main(argv): git_expect(argv[1], argv[2],", "import sys def git_expect(repodir, u, p): os.chdir(repodir) os.system('git pull') os.system('git add .') os.system('git", "os.system('git pull') os.system('git add .') os.system('git commit -m update') foo = pexpect.spawn('git push')", "os.chdir(repodir) os.system('git pull') os.system('git add .') os.system('git commit -m update') foo = pexpect.spawn('git", "pexpect.spawn('git push') foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p) print foo.read() def main(argv): git_expect(argv[1], argv[2], argv[3])", "p): os.chdir(repodir) os.system('git pull') os.system('git add .') os.system('git commit -m update') foo =", "commit -m update') foo = pexpect.spawn('git push') foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p) print foo.read()", "update') foo = pexpect.spawn('git push') foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p) print foo.read() def main(argv):", "os import sys def git_expect(repodir, u, p): os.chdir(repodir) os.system('git pull') os.system('git add .')", "add .') os.system('git commit -m update') foo = pexpect.spawn('git push') foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*')", "foo = pexpect.spawn('git push') foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p) print foo.read() def main(argv): git_expect(argv[1],", "foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p) print foo.read() def main(argv): git_expect(argv[1], argv[2], argv[3]) if __name__ ==", "git_expect(repodir, u, p): os.chdir(repodir) os.system('git pull') os.system('git add .') os.system('git commit -m update')", ".') os.system('git commit -m update') foo = pexpect.spawn('git push') foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p)", "-m update') foo = pexpect.spawn('git push') foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p) print foo.read() def", "#!/usr/bin/python import pexpect import os import sys def git_expect(repodir, u, p): os.chdir(repodir) os.system('git", "u, p): os.chdir(repodir) os.system('git pull') os.system('git add .') os.system('git commit -m update') foo", "import pexpect import os import sys def git_expect(repodir, u, p): os.chdir(repodir) os.system('git pull')", "os.system('git add .') os.system('git commit -m update') foo = pexpect.spawn('git push') foo.expect('.*Username.*:') foo.sendline(u)", "foo.sendline(p) print foo.read() def main(argv): git_expect(argv[1], argv[2], argv[3]) if __name__ == '__main__': main(sys.argv)", "push') foo.expect('.*Username.*:') foo.sendline(u) foo.expect('.*ssword:*') foo.sendline(p) print foo.read() def main(argv): git_expect(argv[1], argv[2], argv[3]) if", "pexpect import os import sys def git_expect(repodir, u, p): os.chdir(repodir) os.system('git pull') os.system('git", "sys def git_expect(repodir, u, p): os.chdir(repodir) os.system('git pull') os.system('git add .') os.system('git commit", "def git_expect(repodir, u, p): os.chdir(repodir) os.system('git pull') os.system('git add .') os.system('git commit -m" ]
[ ") self._seq_len = seq_len @property def seq_len(self): return self._seq_len def _check_inputs(self, obs_seq, skill):", "skill=skill, ) @abc.abstractmethod def train_forwardpass( self, obs_seq, skill, ): raise NotImplementedError @abc.abstractmethod def", "obs_seq=obs_seq, skill=skill, ) @abc.abstractmethod def train_forwardpass( self, obs_seq, skill, ): raise NotImplementedError @abc.abstractmethod", "self, obs_seq, skill, ): raise NotImplementedError @abc.abstractmethod def eval_forwardpass( self, obs_seq, skill, **kwargs,", "obs_dim, seq_len, obs_dims_used=None, obs_dims_used_except=None, ): super(SplitSeqClassifierBase, self).__init__() self.used_dims = get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except,", "if self.training: return self.train_forwardpass( obs_seq=obs_seq, skill=skill, ) else: with torch.no_grad(): return self.eval_forwardpass( obs_seq=obs_seq,", "return self.eval_forwardpass( obs_seq=obs_seq, skill=skill, ) @abc.abstractmethod def train_forwardpass( self, obs_seq, skill, ): raise", "def train_forwardpass( self, obs_seq, skill, ): raise NotImplementedError @abc.abstractmethod def eval_forwardpass( self, obs_seq,", "== self.skill_dim assert len(skill.shape) == 2 assert len(obs_seq.shape) == 3 def forward(self, obs_seq,", ") @abc.abstractmethod def train_forwardpass( self, obs_seq, skill, ): raise NotImplementedError @abc.abstractmethod def eval_forwardpass(", "skill=skill ) obs_seq = obs_seq[..., self.used_dims] if self.training: return self.train_forwardpass( obs_seq=obs_seq, skill=skill, )", "if skill is not None: assert skill.size(batch_dim) == obs_seq.size(batch_dim) assert skill.size(data_dim) == self.skill_dim", "= 0 seq_dim = 1 data_dim = -1 if skill is not None:", "): batch_dim = 0 seq_dim = 1 data_dim = -1 self._check_inputs( obs_seq=obs_seq, skill=skill", "self.training: return self.train_forwardpass( obs_seq=obs_seq, skill=skill, ) else: with torch.no_grad(): return self.eval_forwardpass( obs_seq=obs_seq, skill=skill,", "len(obs_seq.shape) == 3 def forward(self, obs_seq, skill=None ): batch_dim = 0 seq_dim =", "seq_len @property def seq_len(self): return self._seq_len def _check_inputs(self, obs_seq, skill): batch_dim = 0", "import BaseNetwork from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def __init__(self, obs_dim, seq_len,", "1 data_dim = -1 if skill is not None: assert skill.size(batch_dim) == obs_seq.size(batch_dim)", "import abc import torch from code_slac.network.base import BaseNetwork from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df class", "else: with torch.no_grad(): return self.eval_forwardpass( obs_seq=obs_seq, skill=skill, ) @abc.abstractmethod def train_forwardpass( self, obs_seq,", "batch_dim = 0 seq_dim = 1 data_dim = -1 if skill is not", "obs_dims_used_except=None, ): super(SplitSeqClassifierBase, self).__init__() self.used_dims = get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, ) self._seq_len =", "obs_seq=obs_seq, skill=skill ) obs_seq = obs_seq[..., self.used_dims] if self.training: return self.train_forwardpass( obs_seq=obs_seq, skill=skill,", "get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def __init__(self, obs_dim, seq_len, obs_dims_used=None, obs_dims_used_except=None, ): super(SplitSeqClassifierBase, self).__init__()", "super(SplitSeqClassifierBase, self).__init__() self.used_dims = get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, ) self._seq_len = seq_len @property", "0 seq_dim = 1 data_dim = -1 self._check_inputs( obs_seq=obs_seq, skill=skill ) obs_seq =", "obs_dims_used=None, obs_dims_used_except=None, ): super(SplitSeqClassifierBase, self).__init__() self.used_dims = get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, ) self._seq_len", "skill is not None: assert skill.size(batch_dim) == obs_seq.size(batch_dim) assert skill.size(data_dim) == self.skill_dim assert", "torch.no_grad(): return self.eval_forwardpass( obs_seq=obs_seq, skill=skill, ) @abc.abstractmethod def train_forwardpass( self, obs_seq, skill, ):", "-1 if skill is not None: assert skill.size(batch_dim) == obs_seq.size(batch_dim) assert skill.size(data_dim) ==", "obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, ) self._seq_len = seq_len @property def seq_len(self): return self._seq_len def _check_inputs(self,", "skill.size(data_dim) == self.skill_dim assert len(skill.shape) == 2 assert len(obs_seq.shape) == 3 def forward(self,", "0 seq_dim = 1 data_dim = -1 if skill is not None: assert", "self).__init__() self.used_dims = get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, ) self._seq_len = seq_len @property def", "data_dim = -1 self._check_inputs( obs_seq=obs_seq, skill=skill ) obs_seq = obs_seq[..., self.used_dims] if self.training:", "return self.train_forwardpass( obs_seq=obs_seq, skill=skill, ) else: with torch.no_grad(): return self.eval_forwardpass( obs_seq=obs_seq, skill=skill, )", "= 1 data_dim = -1 if skill is not None: assert skill.size(batch_dim) ==", "self.eval_forwardpass( obs_seq=obs_seq, skill=skill, ) @abc.abstractmethod def train_forwardpass( self, obs_seq, skill, ): raise NotImplementedError", "= get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, ) self._seq_len = seq_len @property def seq_len(self): return", "@abc.abstractmethod def train_forwardpass( self, obs_seq, skill, ): raise NotImplementedError @abc.abstractmethod def eval_forwardpass( self,", "SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def __init__(self, obs_dim, seq_len, obs_dims_used=None, obs_dims_used_except=None, ): super(SplitSeqClassifierBase, self).__init__() self.used_dims =", "@property def seq_len(self): return self._seq_len def _check_inputs(self, obs_seq, skill): batch_dim = 0 seq_dim", "== 2 assert len(obs_seq.shape) == 3 def forward(self, obs_seq, skill=None ): batch_dim =", "): raise NotImplementedError @abc.abstractmethod def eval_forwardpass( self, obs_seq, skill, **kwargs, ): raise NotImplementedError", "latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def __init__(self, obs_dim, seq_len, obs_dims_used=None, obs_dims_used_except=None, ):", "with torch.no_grad(): return self.eval_forwardpass( obs_seq=obs_seq, skill=skill, ) @abc.abstractmethod def train_forwardpass( self, obs_seq, skill,", "get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, ) self._seq_len = seq_len @property def seq_len(self): return self._seq_len", "seq_len, obs_dims_used=None, obs_dims_used_except=None, ): super(SplitSeqClassifierBase, self).__init__() self.used_dims = get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, )", "skill.size(batch_dim) == obs_seq.size(batch_dim) assert skill.size(data_dim) == self.skill_dim assert len(skill.shape) == 2 assert len(obs_seq.shape)", "assert len(skill.shape) == 2 assert len(obs_seq.shape) == 3 def forward(self, obs_seq, skill=None ):", "forward(self, obs_seq, skill=None ): batch_dim = 0 seq_dim = 1 data_dim = -1", "code_slac.network.base import BaseNetwork from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def __init__(self, obs_dim,", "_check_inputs(self, obs_seq, skill): batch_dim = 0 seq_dim = 1 data_dim = -1 if", "skill, ): raise NotImplementedError @abc.abstractmethod def eval_forwardpass( self, obs_seq, skill, **kwargs, ): raise", "obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, ) self._seq_len = seq_len @property def seq_len(self): return self._seq_len def", "self.skill_dim assert len(skill.shape) == 2 assert len(obs_seq.shape) == 3 def forward(self, obs_seq, skill=None", "metaclass=abc.ABCMeta): def __init__(self, obs_dim, seq_len, obs_dims_used=None, obs_dims_used_except=None, ): super(SplitSeqClassifierBase, self).__init__() self.used_dims = get_obs_dims_used_df(", "obs_seq[..., self.used_dims] if self.training: return self.train_forwardpass( obs_seq=obs_seq, skill=skill, ) else: with torch.no_grad(): return", "self.train_forwardpass( obs_seq=obs_seq, skill=skill, ) else: with torch.no_grad(): return self.eval_forwardpass( obs_seq=obs_seq, skill=skill, ) @abc.abstractmethod", "= obs_seq[..., self.used_dims] if self.training: return self.train_forwardpass( obs_seq=obs_seq, skill=skill, ) else: with torch.no_grad():", "-1 self._check_inputs( obs_seq=obs_seq, skill=skill ) obs_seq = obs_seq[..., self.used_dims] if self.training: return self.train_forwardpass(", "= 0 seq_dim = 1 data_dim = -1 self._check_inputs( obs_seq=obs_seq, skill=skill ) obs_seq", "def seq_len(self): return self._seq_len def _check_inputs(self, obs_seq, skill): batch_dim = 0 seq_dim =", "== 3 def forward(self, obs_seq, skill=None ): batch_dim = 0 seq_dim = 1", "= -1 if skill is not None: assert skill.size(batch_dim) == obs_seq.size(batch_dim) assert skill.size(data_dim)", ") else: with torch.no_grad(): return self.eval_forwardpass( obs_seq=obs_seq, skill=skill, ) @abc.abstractmethod def train_forwardpass( self,", "self._seq_len = seq_len @property def seq_len(self): return self._seq_len def _check_inputs(self, obs_seq, skill): batch_dim", "skill=skill, ) else: with torch.no_grad(): return self.eval_forwardpass( obs_seq=obs_seq, skill=skill, ) @abc.abstractmethod def train_forwardpass(", "seq_dim = 1 data_dim = -1 if skill is not None: assert skill.size(batch_dim)", "batch_dim = 0 seq_dim = 1 data_dim = -1 self._check_inputs( obs_seq=obs_seq, skill=skill )", "skill): batch_dim = 0 seq_dim = 1 data_dim = -1 if skill is", "= -1 self._check_inputs( obs_seq=obs_seq, skill=skill ) obs_seq = obs_seq[..., self.used_dims] if self.training: return", "class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def __init__(self, obs_dim, seq_len, obs_dims_used=None, obs_dims_used_except=None, ): super(SplitSeqClassifierBase, self).__init__() self.used_dims", "self._seq_len def _check_inputs(self, obs_seq, skill): batch_dim = 0 seq_dim = 1 data_dim =", "self.used_dims] if self.training: return self.train_forwardpass( obs_seq=obs_seq, skill=skill, ) else: with torch.no_grad(): return self.eval_forwardpass(", "obs_seq, skill): batch_dim = 0 seq_dim = 1 data_dim = -1 if skill", "1 data_dim = -1 self._check_inputs( obs_seq=obs_seq, skill=skill ) obs_seq = obs_seq[..., self.used_dims] if", "obs_seq, skill, ): raise NotImplementedError @abc.abstractmethod def eval_forwardpass( self, obs_seq, skill, **kwargs, ):", "__init__(self, obs_dim, seq_len, obs_dims_used=None, obs_dims_used_except=None, ): super(SplitSeqClassifierBase, self).__init__() self.used_dims = get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used,", "= seq_len @property def seq_len(self): return self._seq_len def _check_inputs(self, obs_seq, skill): batch_dim =", "torch from code_slac.network.base import BaseNetwork from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def", "def _check_inputs(self, obs_seq, skill): batch_dim = 0 seq_dim = 1 data_dim = -1", "= 1 data_dim = -1 self._check_inputs( obs_seq=obs_seq, skill=skill ) obs_seq = obs_seq[..., self.used_dims]", "from code_slac.network.base import BaseNetwork from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def __init__(self,", "None: assert skill.size(batch_dim) == obs_seq.size(batch_dim) assert skill.size(data_dim) == self.skill_dim assert len(skill.shape) == 2", "assert skill.size(data_dim) == self.skill_dim assert len(skill.shape) == 2 assert len(obs_seq.shape) == 3 def", "import get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def __init__(self, obs_dim, seq_len, obs_dims_used=None, obs_dims_used_except=None, ): super(SplitSeqClassifierBase,", "abc import torch from code_slac.network.base import BaseNetwork from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork,", "from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def __init__(self, obs_dim, seq_len, obs_dims_used=None, obs_dims_used_except=None,", "): super(SplitSeqClassifierBase, self).__init__() self.used_dims = get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, ) self._seq_len = seq_len", "3 def forward(self, obs_seq, skill=None ): batch_dim = 0 seq_dim = 1 data_dim", "obs_dims_used_except=obs_dims_used_except, ) self._seq_len = seq_len @property def seq_len(self): return self._seq_len def _check_inputs(self, obs_seq,", "2 assert len(obs_seq.shape) == 3 def forward(self, obs_seq, skill=None ): batch_dim = 0", "== obs_seq.size(batch_dim) assert skill.size(data_dim) == self.skill_dim assert len(skill.shape) == 2 assert len(obs_seq.shape) ==", "assert len(obs_seq.shape) == 3 def forward(self, obs_seq, skill=None ): batch_dim = 0 seq_dim", "skill=None ): batch_dim = 0 seq_dim = 1 data_dim = -1 self._check_inputs( obs_seq=obs_seq,", "self._check_inputs( obs_seq=obs_seq, skill=skill ) obs_seq = obs_seq[..., self.used_dims] if self.training: return self.train_forwardpass( obs_seq=obs_seq,", "assert skill.size(batch_dim) == obs_seq.size(batch_dim) assert skill.size(data_dim) == self.skill_dim assert len(skill.shape) == 2 assert", "data_dim = -1 if skill is not None: assert skill.size(batch_dim) == obs_seq.size(batch_dim) assert", "obs_seq.size(batch_dim) assert skill.size(data_dim) == self.skill_dim assert len(skill.shape) == 2 assert len(obs_seq.shape) == 3", "obs_seq=obs_seq, skill=skill, ) else: with torch.no_grad(): return self.eval_forwardpass( obs_seq=obs_seq, skill=skill, ) @abc.abstractmethod def", "seq_dim = 1 data_dim = -1 self._check_inputs( obs_seq=obs_seq, skill=skill ) obs_seq = obs_seq[...,", "def forward(self, obs_seq, skill=None ): batch_dim = 0 seq_dim = 1 data_dim =", "BaseNetwork from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta): def __init__(self, obs_dim, seq_len, obs_dims_used=None,", "def __init__(self, obs_dim, seq_len, obs_dims_used=None, obs_dims_used_except=None, ): super(SplitSeqClassifierBase, self).__init__() self.used_dims = get_obs_dims_used_df( obs_dim=obs_dim,", "is not None: assert skill.size(batch_dim) == obs_seq.size(batch_dim) assert skill.size(data_dim) == self.skill_dim assert len(skill.shape)", "self.used_dims = get_obs_dims_used_df( obs_dim=obs_dim, obs_dims_used=obs_dims_used, obs_dims_used_except=obs_dims_used_except, ) self._seq_len = seq_len @property def seq_len(self):", "train_forwardpass( self, obs_seq, skill, ): raise NotImplementedError @abc.abstractmethod def eval_forwardpass( self, obs_seq, skill,", "obs_seq = obs_seq[..., self.used_dims] if self.training: return self.train_forwardpass( obs_seq=obs_seq, skill=skill, ) else: with", "seq_len(self): return self._seq_len def _check_inputs(self, obs_seq, skill): batch_dim = 0 seq_dim = 1", "return self._seq_len def _check_inputs(self, obs_seq, skill): batch_dim = 0 seq_dim = 1 data_dim", "import torch from code_slac.network.base import BaseNetwork from latent_with_splitseqs.config.fun.get_obs_dims_used_df import get_obs_dims_used_df class SplitSeqClassifierBase(BaseNetwork, metaclass=abc.ABCMeta):", "len(skill.shape) == 2 assert len(obs_seq.shape) == 3 def forward(self, obs_seq, skill=None ): batch_dim", "not None: assert skill.size(batch_dim) == obs_seq.size(batch_dim) assert skill.size(data_dim) == self.skill_dim assert len(skill.shape) ==", "obs_seq, skill=None ): batch_dim = 0 seq_dim = 1 data_dim = -1 self._check_inputs(", ") obs_seq = obs_seq[..., self.used_dims] if self.training: return self.train_forwardpass( obs_seq=obs_seq, skill=skill, ) else:" ]
[ "from __future__ import absolute_import from .commit import * name = \"simplecommit\" if __name__==\"__main__\":", "__future__ import absolute_import from .commit import * name = \"simplecommit\" if __name__==\"__main__\": run()", "<filename>simplecommit/__init__.py from __future__ import absolute_import from .commit import * name = \"simplecommit\" if" ]
[ "the parameter declaration is DT_BOOL. Returns ------- bool \"\"\" return self.data_type == pd.DT_BOOL", "Data Analysis # Platform (ROB). # # Copyright (C) 2019 NYU. # #", "AS_INPUT = '$input' class ParameterBase(object): \"\"\"Base class for template parameter and parameter argument", "files. \"\"\" AS_INPUT = '$input' class ParameterBase(object): \"\"\"Base class for template parameter and", "bool \"\"\" return self.data_type == pd.DT_LIST def is_record(self): \"\"\"Test if data type for", "obj else None self.as_constant = obj[pd.LABEL_AS] if pd.LABEL_AS in obj else None self.children", "# # ROB is free software; you can redistribute it and/or modify it", "class TemplateParameter(ParameterBase): \"\"\"The template parameter is a simple wrapper around a dictionary that", "records \"\"\" super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID], data_type = obj[pd.LABEL_DATATYPE] ) self.obj = obj self.name", "(C) 2019 NYU. # # ROB is free software; you can redistribute it", "val += ' (bool)' elif self.is_file(): val += ' (file)' elif self.is_float(): val", "def is_string(self): \"\"\"Test if data type for the parameter declaration is DT_STRING. Returns", "val = str(self.name) # Add text that indicates the parameter type if self.is_bool():", "val += ' [default \\'' + str(self.default_value) + '\\']' return val + ':", "data_type def is_bool(self): \"\"\"Test if data type for the parameter declaration is DT_BOOL.", "']' else: val += ' [default \\'' + str(self.default_value) + '\\']' return val", "an indication of the data type, the parameter name and the default value", "data type \\'{}\\''.format(data_type)) self.identifier = identifier self.data_type = data_type def is_bool(self): \"\"\"Test if", "obj[pd.LABEL_DATATYPE] ) self.obj = obj self.name = obj[pd.LABEL_NAME] self.description = obj[pd.LABEL_DESCRIPTION] self.index =", "' [default ' + str(self.default_value) + ']' else: val += ' [default \\''", "obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in obj else None self.is_required = obj[pd.LABEL_REQUIRED] self.values = obj[pd.LABEL_VALUES]", "is_bool(self): \"\"\"Test if data type for the parameter declaration is DT_BOOL. Returns -------", "a parameter has children. Only returns True if the list of children is", "def __init__(self, identifier, data_type): \"\"\"Initialize the unique identifier and data type. Raises value", "for parameter lists or records \"\"\" super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID], data_type = obj[pd.LABEL_DATATYPE] )", "return self.data_type == pd.DT_RECORD def is_string(self): \"\"\"Test if data type for the parameter", "different components of the parameter declaration. \"\"\" def __init__(self, obj, children=None): \"\"\"Initialize the", "has_constant(self): \"\"\"True if the as_constant property is not None. Returns ------- bool \"\"\"", "simple wrapper around a dictionary that contains a parameter declaration. The wrapper provides", "' (string)' if not self.default_value is None: if self.is_bool() or self.is_float() or self.is_int():", "data_type: string Identifier for parameter data type Raises ------ benchtmpl.error.InvalidParameterError \"\"\" if not", "obj[pd.LABEL_INDEX] self.default_value = obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in obj else None self.is_required = obj[pd.LABEL_REQUIRED]", "------- bool \"\"\" return not self.as_constant is None def prompt(self): \"\"\"Get default input", "> 0 return False def has_constant(self): \"\"\"True if the as_constant property is not", "parameter instance for child parameter \"\"\" self.children.append(para) self.children.sort(key=lambda p: (p.index, p.identifier)) def as_input(self):", "has children. Only returns True if the list of children is not None", "of the MIT License; see LICENSE file for more details. \"\"\"Base class for", "self.data_type == pd.DT_FILE def is_float(self): \"\"\"Test if data type for the parameter declaration", "is not None and not empty. Returns ------- bool \"\"\" if not self.children", "return self.data_type == pd.DT_STRING class TemplateParameter(ParameterBase): \"\"\"The template parameter is a simple wrapper", "---------- para: benchtmpl.workflow.parameter.base.TemplateParameter Template parameter instance for child parameter \"\"\" self.children.append(para) self.children.sort(key=lambda p:", "value error if the given data type identifier is not valid. Parameters ----------", "children def add_child(self, para): \"\"\"Short-cut to add an element to the list of", "return self.data_type == pd.DT_INTEGER def is_list(self): \"\"\"Test if data type for the parameter", "get_constant(self): \"\"\"Get the value of the as_constant property. Returns ------- string \"\"\" return", "self.data_type == pd.DT_BOOL def is_file(self): \"\"\"Test if data type for the parameter declaration", "input prompt for the parameter declaration. The prompt contains an indication of the", "template parameters. Each parameter has a set of properties that are used to", "the list of children is not None and not empty. Returns ------- bool", "the default value (if defined). Returns ------- string \"\"\" val = str(self.name) #", "self.default_value is None: if self.is_bool() or self.is_float() or self.is_int(): val += ' [default", "identify the parameter, (ii) define a nested parameter structure, and (iii) render UI", "= obj[pd.LABEL_NAME] self.description = obj[pd.LABEL_DESCRIPTION] self.index = obj[pd.LABEL_INDEX] self.default_value = obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT", "template parameter and parameter argument values. The base class maintains the unique parameter", "(if defined). Returns ------- string \"\"\" val = str(self.name) # Add text that", "Template parameter instance for child parameter \"\"\" self.children.append(para) self.children.sort(key=lambda p: (p.index, p.identifier)) def", "declaration is DT_DECIMAL. Returns ------- bool \"\"\" return self.data_type == pd.DT_DECIMAL def is_int(self):", "if data type for the parameter declaration is DT_INTEGER. Returns ------- bool \"\"\"", "+ ': ' def to_dict(self): \"\"\"Get the dictionary serialization for the parameter declaration.", "parameter declaration is DT_DECIMAL. Returns ------- bool \"\"\" return self.data_type == pd.DT_DECIMAL def", "element to the list of children of the parameter. Parameters ---------- para: benchtmpl.workflow.parameter.base.TemplateParameter", "dictionary. Parameters ---------- obj: dict Dictionary containing the template parameter declaration properties children:", "DT_DECIMAL. Returns ------- bool \"\"\" return self.data_type == pd.DT_DECIMAL def is_int(self): \"\"\"Test if", "Returns ------- bool \"\"\" if not self.children is None: return len(self.children) > 0", "template parameter declaration from a given dictionary. Parameters ---------- obj: dict Dictionary containing", "\"\"\"Base class for workflow template parameters. Each parameter has a set of properties", "\"\"\" def __init__(self, identifier, data_type): \"\"\"Initialize the unique identifier and data type. Raises", "type \\'{}\\''.format(data_type)) self.identifier = identifier self.data_type = data_type def is_bool(self): \"\"\"Test if data", "elif self.is_float(): val += ' (decimal)' elif self.is_int(): val += ' (integer)' elif", "the value for the as constant property is the special value that indicates", "not self.children is None: return len(self.children) > 0 return False def has_constant(self): \"\"\"True", "of properties that are used to (i) identify the parameter, (ii) define a", "valid. Parameters ---------- identifier: string Unique parameter identifier data_type: string Identifier for parameter", "self.data_type == pd.DT_STRING class TemplateParameter(ParameterBase): \"\"\"The template parameter is a simple wrapper around", "0 return False def has_constant(self): \"\"\"True if the as_constant property is not None.", "\"\"\" if not data_type in pd.DATA_TYPES: raise InvalidParameterError('invalid data type \\'{}\\''.format(data_type)) self.identifier =", "Unique parameter identifier data_type: string Identifier for parameter data type Raises ------ benchtmpl.error.InvalidParameterError", "is not None. Returns ------- bool \"\"\" return not self.as_constant is None def", "type for the parameter declaration is DT_RECORD. Returns ------- bool \"\"\" return self.data_type", "the data type, the parameter name and the default value (if defined). Returns", "it and/or modify it under the # terms of the MIT License; see", "class ParameterBase(object): \"\"\"Base class for template parameter and parameter argument values. The base", "value that indicates that the property value is provided by the user. \"\"\"", "None and not empty. Returns ------- bool \"\"\" if not self.children is None:", "is the special value that indicates that the property value is provided by", "value for as-property that indicates user input for target path of uploaded files.", "is None: return len(self.children) > 0 return False def has_constant(self): \"\"\"True if the", "elif self.is_int(): val += ' (integer)' elif self.is_string(): val += ' (string)' if", "= str(self.name) # Add text that indicates the parameter type if self.is_bool(): val", "if the as_constant property is not None. Returns ------- bool \"\"\" return not", "is None def prompt(self): \"\"\"Get default input prompt for the parameter declaration. The", "Returns ------- bool \"\"\" return self.data_type == pd.DT_BOOL def is_file(self): \"\"\"Test if data", "\"\"\"Initialize the unique identifier and data type. Raises value error if the given", "for the parameter declaration is DT_INTEGER. Returns ------- bool \"\"\" return self.data_type ==", "properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional list of parameter children for parameter lists or", "\"\"\"Test if data type for the parameter declaration is DT_RECORD. Returns ------- bool", "self.data_type == pd.DT_DECIMAL def is_int(self): \"\"\"Test if data type for the parameter declaration", "Dictionary containing the template parameter declaration properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional list of", "' + str(self.default_value) + ']' else: val += ' [default \\'' + str(self.default_value)", "== pd.DT_INTEGER def is_list(self): \"\"\"Test if data type for the parameter declaration is", "Copyright (C) 2019 NYU. # # ROB is free software; you can redistribute", "is_record(self): \"\"\"Test if data type for the parameter declaration is DT_RECORD. Returns -------", "terms of the MIT License; see LICENSE file for more details. \"\"\"Base class", "' [default \\'' + str(self.default_value) + '\\']' return val + ': ' def", "modify it under the # terms of the MIT License; see LICENSE file", "+= ' (file)' elif self.is_float(): val += ' (decimal)' elif self.is_int(): val +=", "parameter declaration is DT_STRING. Returns ------- bool \"\"\" return self.data_type == pd.DT_STRING class", "self.index = obj[pd.LABEL_INDEX] self.default_value = obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in obj else None self.is_required", "\"\"\" return self.data_type == pd.DT_LIST def is_record(self): \"\"\"Test if data type for the", "not self.as_constant is None def prompt(self): \"\"\"Get default input prompt for the parameter", "\"\"\"Short-cut to add an element to the list of children of the parameter.", "\"\"\"Get default input prompt for the parameter declaration. The prompt contains an indication", "the value of the as_constant property. Returns ------- string \"\"\" return self.as_constant def", "access to the different components of the parameter declaration. \"\"\" def __init__(self, obj,", "and parameter argument values. The base class maintains the unique parameter identifier and", "optional Optional list of parameter children for parameter lists or records \"\"\" super(TemplateParameter,", "unique parameter identifier and the information about the data type. \"\"\" def __init__(self,", "by the user. \"\"\" return self.as_constant == AS_INPUT def get_constant(self): \"\"\"Get the value", "data type for the parameter declaration is DT_STRING. Returns ------- bool \"\"\" return", "of a template parameter declaration from a given dictionary. Parameters ---------- obj: dict", "pd.DT_INTEGER def is_list(self): \"\"\"Test if data type for the parameter declaration is DT_LIST.", "nested parameter structure, and (iii) render UI forms to collect parameter values. \"\"\"", "elif self.is_file(): val += ' (file)' elif self.is_float(): val += ' (decimal)' elif", "Parameters ---------- obj: dict Dictionary containing the template parameter declaration properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter),", "a template parameter declaration from a given dictionary. Parameters ---------- obj: dict Dictionary", "declaration. \"\"\" def __init__(self, obj, children=None): \"\"\"Initialize the different attributes of a template", "are used to (i) identify the parameter, (ii) define a nested parameter structure,", "string \"\"\" val = str(self.name) # Add text that indicates the parameter type", "= obj[pd.LABEL_DATATYPE] ) self.obj = obj self.name = obj[pd.LABEL_NAME] self.description = obj[pd.LABEL_DESCRIPTION] self.index", "para: benchtmpl.workflow.parameter.base.TemplateParameter Template parameter instance for child parameter \"\"\" self.children.append(para) self.children.sort(key=lambda p: (p.index,", "identifier is not valid. Parameters ---------- identifier: string Unique parameter identifier data_type: string", "__init__(self, obj, children=None): \"\"\"Initialize the different attributes of a template parameter declaration from", "------- bool \"\"\" if not self.children is None: return len(self.children) > 0 return", "declaration is DT_STRING. Returns ------- bool \"\"\" return self.data_type == pd.DT_STRING class TemplateParameter(ParameterBase):", "file is part of the Reproducible Open Benchmarks for Data Analysis # Platform", "self.as_constant is None def prompt(self): \"\"\"Get default input prompt for the parameter declaration.", "is_file(self): \"\"\"Test if data type for the parameter declaration is DT_FILE. Returns -------", "def has_constant(self): \"\"\"True if the as_constant property is not None. Returns ------- bool", "None self.as_constant = obj[pd.LABEL_AS] if pd.LABEL_AS in obj else None self.children = children", "dict Dictionary containing the template parameter declaration properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional list", "as_input(self): \"\"\"Flag indicating whether the value for the as constant property is the", "declaration is DT_LIST. Returns ------- bool \"\"\" return self.data_type == pd.DT_LIST def is_record(self):", "that indicates user input for target path of uploaded files. \"\"\" AS_INPUT =", "self.children.sort(key=lambda p: (p.index, p.identifier)) def as_input(self): \"\"\"Flag indicating whether the value for the", "the information about the data type. \"\"\" def __init__(self, identifier, data_type): \"\"\"Initialize the", "and data type. Raises value error if the given data type identifier is", "val += ' (file)' elif self.is_float(): val += ' (decimal)' elif self.is_int(): val", "# terms of the MIT License; see LICENSE file for more details. \"\"\"Base", "data type, the parameter name and the default value (if defined). Returns -------", "self.is_bool(): val += ' (bool)' elif self.is_file(): val += ' (file)' elif self.is_float():", "indicates that the property value is provided by the user. \"\"\" return self.as_constant", "string \"\"\" return self.as_constant def has_children(self): \"\"\"Test if a parameter has children. Only", "to (i) identify the parameter, (ii) define a nested parameter structure, and (iii)", "parameter, (ii) define a nested parameter structure, and (iii) render UI forms to", "if data type for the parameter declaration is DT_RECORD. Returns ------- bool \"\"\"", "default value (if defined). Returns ------- string \"\"\" val = str(self.name) # Add", "parameter name and the default value (if defined). Returns ------- string \"\"\" val", "Each parameter has a set of properties that are used to (i) identify", "a nested parameter structure, and (iii) render UI forms to collect parameter values.", "---------- identifier: string Unique parameter identifier data_type: string Identifier for parameter data type", "\"\"\" return self.data_type == pd.DT_RECORD def is_string(self): \"\"\"Test if data type for the", "if data type for the parameter declaration is DT_LIST. Returns ------- bool \"\"\"", "p.identifier)) def as_input(self): \"\"\"Flag indicating whether the value for the as constant property", "in obj else None self.children = children def add_child(self, para): \"\"\"Short-cut to add", "DT_STRING. Returns ------- bool \"\"\" return self.data_type == pd.DT_STRING class TemplateParameter(ParameterBase): \"\"\"The template", "file for more details. \"\"\"Base class for workflow template parameters. Each parameter has", "is_int(self): \"\"\"Test if data type for the parameter declaration is DT_INTEGER. Returns -------", "identifier: string Unique parameter identifier data_type: string Identifier for parameter data type Raises", "self.data_type == pd.DT_INTEGER def is_list(self): \"\"\"Test if data type for the parameter declaration", "== AS_INPUT def get_constant(self): \"\"\"Get the value of the as_constant property. Returns -------", "else: val += ' [default \\'' + str(self.default_value) + '\\']' return val +", "user input for target path of uploaded files. \"\"\" AS_INPUT = '$input' class", "to collect parameter values. \"\"\" from benchtmpl.error import InvalidParameterError import benchtmpl.workflow.parameter.declaration as pd", "to the different components of the parameter declaration. \"\"\" def __init__(self, obj, children=None):", "the different components of the parameter declaration. \"\"\" def __init__(self, obj, children=None): \"\"\"Initialize", "== pd.DT_FILE def is_float(self): \"\"\"Test if data type for the parameter declaration is", "self.children.append(para) self.children.sort(key=lambda p: (p.index, p.identifier)) def as_input(self): \"\"\"Flag indicating whether the value for", "of uploaded files. \"\"\" AS_INPUT = '$input' class ParameterBase(object): \"\"\"Base class for template", "benchtmpl.workflow.parameter.declaration as pd \"\"\"Special value for as-property that indicates user input for target", "as_constant property. Returns ------- string \"\"\" return self.as_constant def has_children(self): \"\"\"Test if a", "(bool)' elif self.is_file(): val += ' (file)' elif self.is_float(): val += ' (decimal)'", "the template parameter declaration properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional list of parameter children", "+= ' [default \\'' + str(self.default_value) + '\\']' return val + ': '", "the property value is provided by the user. \"\"\" return self.as_constant == AS_INPUT", "= obj[pd.LABEL_DESCRIPTION] self.index = obj[pd.LABEL_INDEX] self.default_value = obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in obj else", "Reproducible Open Benchmarks for Data Analysis # Platform (ROB). # # Copyright (C)", "the given data type identifier is not valid. Parameters ---------- identifier: string Unique", "Only returns True if the list of children is not None and not", "prompt contains an indication of the data type, the parameter name and the", "is DT_BOOL. Returns ------- bool \"\"\" return self.data_type == pd.DT_BOOL def is_file(self): \"\"\"Test", "def to_dict(self): \"\"\"Get the dictionary serialization for the parameter declaration. Returns ------- dict", "data type. Raises value error if the given data type identifier is not", "data type for the parameter declaration is DT_LIST. Returns ------- bool \"\"\" return", "\"\"\"Get the dictionary serialization for the parameter declaration. Returns ------- dict \"\"\" return", "if data type for the parameter declaration is DT_BOOL. Returns ------- bool \"\"\"", "parameter declaration is DT_BOOL. Returns ------- bool \"\"\" return self.data_type == pd.DT_BOOL def", "def has_children(self): \"\"\"Test if a parameter has children. Only returns True if the", "workflow template parameters. Each parameter has a set of properties that are used", "\"\"\"Test if data type for the parameter declaration is DT_FILE. Returns ------- bool", "the parameter declaration is DT_INTEGER. Returns ------- bool \"\"\" return self.data_type == pd.DT_INTEGER", "containing the template parameter declaration properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional list of parameter", "values. \"\"\" from benchtmpl.error import InvalidParameterError import benchtmpl.workflow.parameter.declaration as pd \"\"\"Special value for", "self.name = obj[pd.LABEL_NAME] self.description = obj[pd.LABEL_DESCRIPTION] self.index = obj[pd.LABEL_INDEX] self.default_value = obj[pd.LABEL_DEFAULT] if", "is not valid. Parameters ---------- identifier: string Unique parameter identifier data_type: string Identifier", "val += ' [default ' + str(self.default_value) + ']' else: val += '", "declaration is DT_RECORD. Returns ------- bool \"\"\" return self.data_type == pd.DT_RECORD def is_string(self):", "+= ' (string)' if not self.default_value is None: if self.is_bool() or self.is_float() or", "the list of children of the parameter. Parameters ---------- para: benchtmpl.workflow.parameter.base.TemplateParameter Template parameter", "string Identifier for parameter data type Raises ------ benchtmpl.error.InvalidParameterError \"\"\" if not data_type", "type, the parameter name and the default value (if defined). Returns ------- string", "self.description = obj[pd.LABEL_DESCRIPTION] self.index = obj[pd.LABEL_INDEX] self.default_value = obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in obj", "self.as_constant def has_children(self): \"\"\"Test if a parameter has children. Only returns True if", "Platform (ROB). # # Copyright (C) 2019 NYU. # # ROB is free", "return self.data_type == pd.DT_BOOL def is_file(self): \"\"\"Test if data type for the parameter", "used to (i) identify the parameter, (ii) define a nested parameter structure, and", "if pd.LABEL_VALUES in obj else None self.parent = obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in obj", "and (iii) render UI forms to collect parameter values. \"\"\" from benchtmpl.error import", "Returns ------- bool \"\"\" return self.data_type == pd.DT_LIST def is_record(self): \"\"\"Test if data", "str(self.name) # Add text that indicates the parameter type if self.is_bool(): val +=", "\"\"\"Test if data type for the parameter declaration is DT_BOOL. Returns ------- bool", "parameter is a simple wrapper around a dictionary that contains a parameter declaration.", "to_dict(self): \"\"\"Get the dictionary serialization for the parameter declaration. Returns ------- dict \"\"\"", "part of the Reproducible Open Benchmarks for Data Analysis # Platform (ROB). #", "obj[pd.LABEL_REQUIRED] self.values = obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in obj else None self.parent = obj[pd.LABEL_PARENT]", "pd.LABEL_PARENT in obj else None self.as_constant = obj[pd.LABEL_AS] if pd.LABEL_AS in obj else", "to the list of children of the parameter. Parameters ---------- para: benchtmpl.workflow.parameter.base.TemplateParameter Template", "if a parameter has children. Only returns True if the list of children", "for the parameter declaration. The prompt contains an indication of the data type,", "para): \"\"\"Short-cut to add an element to the list of children of the", "benchtmpl.error import InvalidParameterError import benchtmpl.workflow.parameter.declaration as pd \"\"\"Special value for as-property that indicates", "as constant property is the special value that indicates that the property value", "is_float(self): \"\"\"Test if data type for the parameter declaration is DT_DECIMAL. Returns -------", "parameter lists or records \"\"\" super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID], data_type = obj[pd.LABEL_DATATYPE] ) self.obj", "\"\"\" return not self.as_constant is None def prompt(self): \"\"\"Get default input prompt for", "None def prompt(self): \"\"\"Get default input prompt for the parameter declaration. The prompt", "= obj[pd.LABEL_REQUIRED] self.values = obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in obj else None self.parent =", "parameter \"\"\" self.children.append(para) self.children.sort(key=lambda p: (p.index, p.identifier)) def as_input(self): \"\"\"Flag indicating whether the", "Parameters ---------- identifier: string Unique parameter identifier data_type: string Identifier for parameter data", "Returns ------- bool \"\"\" return not self.as_constant is None def prompt(self): \"\"\"Get default", "\"\"\"Test if a parameter has children. Only returns True if the list of", "self.values = obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in obj else None self.parent = obj[pd.LABEL_PARENT] if", "# # Copyright (C) 2019 NYU. # # ROB is free software; you", "(iii) render UI forms to collect parameter values. \"\"\" from benchtmpl.error import InvalidParameterError", "the parameter declaration is DT_DECIMAL. Returns ------- bool \"\"\" return self.data_type == pd.DT_DECIMAL", "is_string(self): \"\"\"Test if data type for the parameter declaration is DT_STRING. Returns -------", "if not self.children is None: return len(self.children) > 0 return False def has_constant(self):", "def is_int(self): \"\"\"Test if data type for the parameter declaration is DT_INTEGER. Returns", "for template parameter and parameter argument values. The base class maintains the unique", "if data type for the parameter declaration is DT_STRING. Returns ------- bool \"\"\"", "declaration. The wrapper provides easy access to the different components of the parameter", "\"\"\"Get the value of the as_constant property. Returns ------- string \"\"\" return self.as_constant", "return self.data_type == pd.DT_LIST def is_record(self): \"\"\"Test if data type for the parameter", "of parameter children for parameter lists or records \"\"\" super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID], data_type", "= obj[pd.LABEL_AS] if pd.LABEL_AS in obj else None self.children = children def add_child(self,", "self.obj = obj self.name = obj[pd.LABEL_NAME] self.description = obj[pd.LABEL_DESCRIPTION] self.index = obj[pd.LABEL_INDEX] self.default_value", "None self.children = children def add_child(self, para): \"\"\"Short-cut to add an element to", "for the as constant property is the special value that indicates that the", "is part of the Reproducible Open Benchmarks for Data Analysis # Platform (ROB).", "None: return len(self.children) > 0 return False def has_constant(self): \"\"\"True if the as_constant", "parameter has children. Only returns True if the list of children is not", "# Copyright (C) 2019 NYU. # # ROB is free software; you can", "type. \"\"\" def __init__(self, identifier, data_type): \"\"\"Initialize the unique identifier and data type.", "add an element to the list of children of the parameter. Parameters ----------", "Returns ------- bool \"\"\" return self.data_type == pd.DT_RECORD def is_string(self): \"\"\"Test if data", "list of parameter children for parameter lists or records \"\"\" super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID],", "parameter children for parameter lists or records \"\"\" super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID], data_type =", "given data type identifier is not valid. Parameters ---------- identifier: string Unique parameter", "License; see LICENSE file for more details. \"\"\"Base class for workflow template parameters.", "self.data_type = data_type def is_bool(self): \"\"\"Test if data type for the parameter declaration", "unique identifier and data type. Raises value error if the given data type", "that indicates that the property value is provided by the user. \"\"\" return", "user. \"\"\" return self.as_constant == AS_INPUT def get_constant(self): \"\"\"Get the value of the", "as_constant property is not None. Returns ------- bool \"\"\" return not self.as_constant is", "the parameter declaration is DT_FILE. Returns ------- bool \"\"\" return self.data_type == pd.DT_FILE", "parameter data type Raises ------ benchtmpl.error.InvalidParameterError \"\"\" if not data_type in pd.DATA_TYPES: raise", "for more details. \"\"\"Base class for workflow template parameters. Each parameter has a", "for the parameter declaration is DT_DECIMAL. Returns ------- bool \"\"\" return self.data_type ==", "obj[pd.LABEL_NAME] self.description = obj[pd.LABEL_DESCRIPTION] self.index = obj[pd.LABEL_INDEX] self.default_value = obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in", "val += ' (decimal)' elif self.is_int(): val += ' (integer)' elif self.is_string(): val", "== pd.DT_STRING class TemplateParameter(ParameterBase): \"\"\"The template parameter is a simple wrapper around a", "Returns ------- bool \"\"\" return self.data_type == pd.DT_DECIMAL def is_int(self): \"\"\"Test if data", "obj, children=None): \"\"\"Initialize the different attributes of a template parameter declaration from a", "= obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in obj else None self.is_required = obj[pd.LABEL_REQUIRED] self.values =", "around a dictionary that contains a parameter declaration. The wrapper provides easy access", "= data_type def is_bool(self): \"\"\"Test if data type for the parameter declaration is", "benchtmpl.workflow.parameter.base.TemplateParameter Template parameter instance for child parameter \"\"\" self.children.append(para) self.children.sort(key=lambda p: (p.index, p.identifier))", "child parameter \"\"\" self.children.append(para) self.children.sort(key=lambda p: (p.index, p.identifier)) def as_input(self): \"\"\"Flag indicating whether", "is a simple wrapper around a dictionary that contains a parameter declaration. The", "parameter argument values. The base class maintains the unique parameter identifier and the", "the parameter name and the default value (if defined). Returns ------- string \"\"\"", "identifier, data_type): \"\"\"Initialize the unique identifier and data type. Raises value error if", "for the parameter declaration is DT_BOOL. Returns ------- bool \"\"\" return self.data_type ==", "------- string \"\"\" val = str(self.name) # Add text that indicates the parameter", "redistribute it and/or modify it under the # terms of the MIT License;", "' (decimal)' elif self.is_int(): val += ' (integer)' elif self.is_string(): val += '", "of the parameter. Parameters ---------- para: benchtmpl.workflow.parameter.base.TemplateParameter Template parameter instance for child parameter", "if data type for the parameter declaration is DT_FILE. Returns ------- bool \"\"\"", "the data type. \"\"\" def __init__(self, identifier, data_type): \"\"\"Initialize the unique identifier and", "'\\']' return val + ': ' def to_dict(self): \"\"\"Get the dictionary serialization for", "\"\"\" AS_INPUT = '$input' class ParameterBase(object): \"\"\"Base class for template parameter and parameter", "The wrapper provides easy access to the different components of the parameter declaration.", "def as_input(self): \"\"\"Flag indicating whether the value for the as constant property is", "def is_list(self): \"\"\"Test if data type for the parameter declaration is DT_LIST. Returns", "bool \"\"\" return not self.as_constant is None def prompt(self): \"\"\"Get default input prompt", "has a set of properties that are used to (i) identify the parameter,", "Returns ------- bool \"\"\" return self.data_type == pd.DT_STRING class TemplateParameter(ParameterBase): \"\"\"The template parameter", "whether the value for the as constant property is the special value that", "parameter type if self.is_bool(): val += ' (bool)' elif self.is_file(): val += '", "== pd.DT_BOOL def is_file(self): \"\"\"Test if data type for the parameter declaration is", "type for the parameter declaration is DT_STRING. Returns ------- bool \"\"\" return self.data_type", "parameter declaration. \"\"\" def __init__(self, obj, children=None): \"\"\"Initialize the different attributes of a", ") self.obj = obj self.name = obj[pd.LABEL_NAME] self.description = obj[pd.LABEL_DESCRIPTION] self.index = obj[pd.LABEL_INDEX]", "from a given dictionary. Parameters ---------- obj: dict Dictionary containing the template parameter", "parameter. Parameters ---------- para: benchtmpl.workflow.parameter.base.TemplateParameter Template parameter instance for child parameter \"\"\" self.children.append(para)", "is_list(self): \"\"\"Test if data type for the parameter declaration is DT_LIST. Returns -------", "argument values. The base class maintains the unique parameter identifier and the information", "\"\"\"Test if data type for the parameter declaration is DT_LIST. Returns ------- bool", "\"\"\"True if the as_constant property is not None. Returns ------- bool \"\"\" return", "is DT_FILE. Returns ------- bool \"\"\" return self.data_type == pd.DT_FILE def is_float(self): \"\"\"Test", "Returns ------- bool \"\"\" return self.data_type == pd.DT_INTEGER def is_list(self): \"\"\"Test if data", "contains an indication of the data type, the parameter name and the default", "return self.as_constant def has_children(self): \"\"\"Test if a parameter has children. Only returns True", "for the parameter declaration is DT_FILE. Returns ------- bool \"\"\" return self.data_type ==", "self.is_file(): val += ' (file)' elif self.is_float(): val += ' (decimal)' elif self.is_int():", "parameter identifier and the information about the data type. \"\"\" def __init__(self, identifier,", "a set of properties that are used to (i) identify the parameter, (ii)", "2019 NYU. # # ROB is free software; you can redistribute it and/or", "data type for the parameter declaration is DT_FILE. Returns ------- bool \"\"\" return", "if not data_type in pd.DATA_TYPES: raise InvalidParameterError('invalid data type \\'{}\\''.format(data_type)) self.identifier = identifier", "a given dictionary. Parameters ---------- obj: dict Dictionary containing the template parameter declaration", "list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional list of parameter children for parameter lists or records \"\"\"", "type for the parameter declaration is DT_BOOL. Returns ------- bool \"\"\" return self.data_type", "children=None): \"\"\"Initialize the different attributes of a template parameter declaration from a given", "def is_bool(self): \"\"\"Test if data type for the parameter declaration is DT_BOOL. Returns", "the as constant property is the special value that indicates that the property", "obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in obj else None self.as_constant = obj[pd.LABEL_AS] if pd.LABEL_AS in", "for as-property that indicates user input for target path of uploaded files. \"\"\"", "data type Raises ------ benchtmpl.error.InvalidParameterError \"\"\" if not data_type in pd.DATA_TYPES: raise InvalidParameterError('invalid", "data type. \"\"\" def __init__(self, identifier, data_type): \"\"\"Initialize the unique identifier and data", "pd.DT_RECORD def is_string(self): \"\"\"Test if data type for the parameter declaration is DT_STRING.", "------- bool \"\"\" return self.data_type == pd.DT_STRING class TemplateParameter(ParameterBase): \"\"\"The template parameter is", "if pd.LABEL_PARENT in obj else None self.as_constant = obj[pd.LABEL_AS] if pd.LABEL_AS in obj", "class for template parameter and parameter argument values. The base class maintains the", "\"\"\" def __init__(self, obj, children=None): \"\"\"Initialize the different attributes of a template parameter", "the user. \"\"\" return self.as_constant == AS_INPUT def get_constant(self): \"\"\"Get the value of", "return not self.as_constant is None def prompt(self): \"\"\"Get default input prompt for the", "the parameter. Parameters ---------- para: benchtmpl.workflow.parameter.base.TemplateParameter Template parameter instance for child parameter \"\"\"", "indicates the parameter type if self.is_bool(): val += ' (bool)' elif self.is_file(): val", "type identifier is not valid. Parameters ---------- identifier: string Unique parameter identifier data_type:", "free software; you can redistribute it and/or modify it under the # terms", "value for the as constant property is the special value that indicates that", "not None. Returns ------- bool \"\"\" return not self.as_constant is None def prompt(self):", "that the property value is provided by the user. \"\"\" return self.as_constant ==", "parameter declaration is DT_INTEGER. Returns ------- bool \"\"\" return self.data_type == pd.DT_INTEGER def", "val + ': ' def to_dict(self): \"\"\"Get the dictionary serialization for the parameter", "property is not None. Returns ------- bool \"\"\" return not self.as_constant is None", "and the information about the data type. \"\"\" def __init__(self, identifier, data_type): \"\"\"Initialize", "True if the list of children is not None and not empty. Returns", "parameter declaration properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional list of parameter children for parameter", "declaration is DT_BOOL. Returns ------- bool \"\"\" return self.data_type == pd.DT_BOOL def is_file(self):", "value is provided by the user. \"\"\" return self.as_constant == AS_INPUT def get_constant(self):", "obj else None self.is_required = obj[pd.LABEL_REQUIRED] self.values = obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in obj", "\"\"\"Flag indicating whether the value for the as constant property is the special", "= '$input' class ParameterBase(object): \"\"\"Base class for template parameter and parameter argument values.", "declaration is DT_FILE. Returns ------- bool \"\"\" return self.data_type == pd.DT_FILE def is_float(self):", "------- bool \"\"\" return self.data_type == pd.DT_BOOL def is_file(self): \"\"\"Test if data type", "parameter values. \"\"\" from benchtmpl.error import InvalidParameterError import benchtmpl.workflow.parameter.declaration as pd \"\"\"Special value", "data type for the parameter declaration is DT_BOOL. Returns ------- bool \"\"\" return", "\"\"\" return self.as_constant == AS_INPUT def get_constant(self): \"\"\"Get the value of the as_constant", "raise InvalidParameterError('invalid data type \\'{}\\''.format(data_type)) self.identifier = identifier self.data_type = data_type def is_bool(self):", "parameter structure, and (iii) render UI forms to collect parameter values. \"\"\" from", "type for the parameter declaration is DT_LIST. Returns ------- bool \"\"\" return self.data_type", "input for target path of uploaded files. \"\"\" AS_INPUT = '$input' class ParameterBase(object):", "provided by the user. \"\"\" return self.as_constant == AS_INPUT def get_constant(self): \"\"\"Get the", "------- string \"\"\" return self.as_constant def has_children(self): \"\"\"Test if a parameter has children.", "for workflow template parameters. Each parameter has a set of properties that are", "bool \"\"\" return self.data_type == pd.DT_DECIMAL def is_int(self): \"\"\"Test if data type for", "class maintains the unique parameter identifier and the information about the data type.", "pd.DT_DECIMAL def is_int(self): \"\"\"Test if data type for the parameter declaration is DT_INTEGER.", "obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in obj else None self.parent = obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in", "the as_constant property is not None. Returns ------- bool \"\"\" return not self.as_constant", "special value that indicates that the property value is provided by the user.", "(decimal)' elif self.is_int(): val += ' (integer)' elif self.is_string(): val += ' (string)'", "identifier=obj[pd.LABEL_ID], data_type = obj[pd.LABEL_DATATYPE] ) self.obj = obj self.name = obj[pd.LABEL_NAME] self.description =", "path of uploaded files. \"\"\" AS_INPUT = '$input' class ParameterBase(object): \"\"\"Base class for", "else None self.children = children def add_child(self, para): \"\"\"Short-cut to add an element", "self.data_type == pd.DT_LIST def is_record(self): \"\"\"Test if data type for the parameter declaration", "self.as_constant = obj[pd.LABEL_AS] if pd.LABEL_AS in obj else None self.children = children def", "= obj[pd.LABEL_INDEX] self.default_value = obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in obj else None self.is_required =", "define a nested parameter structure, and (iii) render UI forms to collect parameter", "and the default value (if defined). Returns ------- string \"\"\" val = str(self.name)", "or self.is_int(): val += ' [default ' + str(self.default_value) + ']' else: val", "declaration is DT_INTEGER. Returns ------- bool \"\"\" return self.data_type == pd.DT_INTEGER def is_list(self):", "\"\"\"Test if data type for the parameter declaration is DT_DECIMAL. Returns ------- bool", "that indicates the parameter type if self.is_bool(): val += ' (bool)' elif self.is_file():", "\"\"\"Initialize the different attributes of a template parameter declaration from a given dictionary.", "of the data type, the parameter name and the default value (if defined).", "string Unique parameter identifier data_type: string Identifier for parameter data type Raises ------", "self.is_string(): val += ' (string)' if not self.default_value is None: if self.is_bool() or", "return val + ': ' def to_dict(self): \"\"\"Get the dictionary serialization for the", "software; you can redistribute it and/or modify it under the # terms of", "return self.data_type == pd.DT_DECIMAL def is_int(self): \"\"\"Test if data type for the parameter", "wrapper around a dictionary that contains a parameter declaration. The wrapper provides easy", "data type for the parameter declaration is DT_DECIMAL. Returns ------- bool \"\"\" return", "This file is part of the Reproducible Open Benchmarks for Data Analysis #", "\"\"\" return self.data_type == pd.DT_BOOL def is_file(self): \"\"\"Test if data type for the", "render UI forms to collect parameter values. \"\"\" from benchtmpl.error import InvalidParameterError import", "type Raises ------ benchtmpl.error.InvalidParameterError \"\"\" if not data_type in pd.DATA_TYPES: raise InvalidParameterError('invalid data", "values. The base class maintains the unique parameter identifier and the information about", "pd.LABEL_AS in obj else None self.children = children def add_child(self, para): \"\"\"Short-cut to", "------- bool \"\"\" return self.data_type == pd.DT_FILE def is_float(self): \"\"\"Test if data type", "None self.is_required = obj[pd.LABEL_REQUIRED] self.values = obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in obj else None", "property value is provided by the user. \"\"\" return self.as_constant == AS_INPUT def", "information about the data type. \"\"\" def __init__(self, identifier, data_type): \"\"\"Initialize the unique", "indicating whether the value for the as constant property is the special value", "the as_constant property. Returns ------- string \"\"\" return self.as_constant def has_children(self): \"\"\"Test if", "\"\"\" return self.data_type == pd.DT_FILE def is_float(self): \"\"\"Test if data type for the", "the parameter type if self.is_bool(): val += ' (bool)' elif self.is_file(): val +=", "(i) identify the parameter, (ii) define a nested parameter structure, and (iii) render", "None: if self.is_bool() or self.is_float() or self.is_int(): val += ' [default ' +", "if self.is_bool(): val += ' (bool)' elif self.is_file(): val += ' (file)' elif", "def is_file(self): \"\"\"Test if data type for the parameter declaration is DT_FILE. Returns", "data type for the parameter declaration is DT_INTEGER. Returns ------- bool \"\"\" return", "\"\"\"Special value for as-property that indicates user input for target path of uploaded", "data_type): \"\"\"Initialize the unique identifier and data type. Raises value error if the", "' (file)' elif self.is_float(): val += ' (decimal)' elif self.is_int(): val += '", "if not self.default_value is None: if self.is_bool() or self.is_float() or self.is_int(): val +=", "if the list of children is not None and not empty. Returns -------", "pd.DT_FILE def is_float(self): \"\"\"Test if data type for the parameter declaration is DT_DECIMAL.", "InvalidParameterError('invalid data type \\'{}\\''.format(data_type)) self.identifier = identifier self.data_type = data_type def is_bool(self): \"\"\"Test", "== pd.DT_DECIMAL def is_int(self): \"\"\"Test if data type for the parameter declaration is", "of the parameter declaration. \"\"\" def __init__(self, obj, children=None): \"\"\"Initialize the different attributes", "\"\"\" return self.data_type == pd.DT_DECIMAL def is_int(self): \"\"\"Test if data type for the", "return False def has_constant(self): \"\"\"True if the as_constant property is not None. Returns", "parameter declaration is DT_LIST. Returns ------- bool \"\"\" return self.data_type == pd.DT_LIST def", "is free software; you can redistribute it and/or modify it under the #", "(integer)' elif self.is_string(): val += ' (string)' if not self.default_value is None: if", "obj[pd.LABEL_AS] if pd.LABEL_AS in obj else None self.children = children def add_child(self, para):", "str(self.default_value) + '\\']' return val + ': ' def to_dict(self): \"\"\"Get the dictionary", "def is_float(self): \"\"\"Test if data type for the parameter declaration is DT_DECIMAL. Returns", "self.is_float() or self.is_int(): val += ' [default ' + str(self.default_value) + ']' else:", "children of the parameter. Parameters ---------- para: benchtmpl.workflow.parameter.base.TemplateParameter Template parameter instance for child", "return len(self.children) > 0 return False def has_constant(self): \"\"\"True if the as_constant property", "not None and not empty. Returns ------- bool \"\"\" if not self.children is", "Analysis # Platform (ROB). # # Copyright (C) 2019 NYU. # # ROB", "or records \"\"\" super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID], data_type = obj[pd.LABEL_DATATYPE] ) self.obj = obj", "InvalidParameterError import benchtmpl.workflow.parameter.declaration as pd \"\"\"Special value for as-property that indicates user input", "bool \"\"\" return self.data_type == pd.DT_FILE def is_float(self): \"\"\"Test if data type for", "default input prompt for the parameter declaration. The prompt contains an indication of", "------ benchtmpl.error.InvalidParameterError \"\"\" if not data_type in pd.DATA_TYPES: raise InvalidParameterError('invalid data type \\'{}\\''.format(data_type))", "Returns ------- string \"\"\" return self.as_constant def has_children(self): \"\"\"Test if a parameter has", "children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional list of parameter children for parameter lists or records", "# Add text that indicates the parameter type if self.is_bool(): val += '", "else None self.as_constant = obj[pd.LABEL_AS] if pd.LABEL_AS in obj else None self.children =", "for the parameter declaration is DT_STRING. Returns ------- bool \"\"\" return self.data_type ==", "str(self.default_value) + ']' else: val += ' [default \\'' + str(self.default_value) + '\\']'", "+= ' (integer)' elif self.is_string(): val += ' (string)' if not self.default_value is", "The prompt contains an indication of the data type, the parameter name and", "self.is_bool() or self.is_float() or self.is_int(): val += ' [default ' + str(self.default_value) +", "Returns ------- bool \"\"\" return self.data_type == pd.DT_FILE def is_float(self): \"\"\"Test if data", "the parameter declaration. \"\"\" def __init__(self, obj, children=None): \"\"\"Initialize the different attributes of", "the dictionary serialization for the parameter declaration. Returns ------- dict \"\"\" return self.obj", "parameter and parameter argument values. The base class maintains the unique parameter identifier", "\"\"\"The template parameter is a simple wrapper around a dictionary that contains a", "self.children is None: return len(self.children) > 0 return False def has_constant(self): \"\"\"True if", "and not empty. Returns ------- bool \"\"\" if not self.children is None: return", "self.is_required = obj[pd.LABEL_REQUIRED] self.values = obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in obj else None self.parent", "contains a parameter declaration. The wrapper provides easy access to the different components", "MIT License; see LICENSE file for more details. \"\"\"Base class for workflow template", "property. Returns ------- string \"\"\" return self.as_constant def has_children(self): \"\"\"Test if a parameter", "of children of the parameter. Parameters ---------- para: benchtmpl.workflow.parameter.base.TemplateParameter Template parameter instance for", "super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID], data_type = obj[pd.LABEL_DATATYPE] ) self.obj = obj self.name = obj[pd.LABEL_NAME]", "of children is not None and not empty. Returns ------- bool \"\"\" if", "parameters. Each parameter has a set of properties that are used to (i)", "bool \"\"\" if not self.children is None: return len(self.children) > 0 return False", "target path of uploaded files. \"\"\" AS_INPUT = '$input' class ParameterBase(object): \"\"\"Base class", "obj else None self.children = children def add_child(self, para): \"\"\"Short-cut to add an", "(ROB). # # Copyright (C) 2019 NYU. # # ROB is free software;", "p: (p.index, p.identifier)) def as_input(self): \"\"\"Flag indicating whether the value for the as", "data type for the parameter declaration is DT_RECORD. Returns ------- bool \"\"\" return", "the different attributes of a template parameter declaration from a given dictionary. Parameters", "' (bool)' elif self.is_file(): val += ' (file)' elif self.is_float(): val += '", "benchtmpl.error.InvalidParameterError \"\"\" if not data_type in pd.DATA_TYPES: raise InvalidParameterError('invalid data type \\'{}\\''.format(data_type)) self.identifier", "if data type for the parameter declaration is DT_DECIMAL. Returns ------- bool \"\"\"", "declaration properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional list of parameter children for parameter lists", "given dictionary. Parameters ---------- obj: dict Dictionary containing the template parameter declaration properties", "indication of the data type, the parameter name and the default value (if", "val += ' (string)' if not self.default_value is None: if self.is_bool() or self.is_float()", "easy access to the different components of the parameter declaration. \"\"\" def __init__(self,", "+ str(self.default_value) + ']' else: val += ' [default \\'' + str(self.default_value) +", "from benchtmpl.error import InvalidParameterError import benchtmpl.workflow.parameter.declaration as pd \"\"\"Special value for as-property that", "the parameter declaration is DT_STRING. Returns ------- bool \"\"\" return self.data_type == pd.DT_STRING", "ParameterBase(object): \"\"\"Base class for template parameter and parameter argument values. The base class", "DT_INTEGER. Returns ------- bool \"\"\" return self.data_type == pd.DT_INTEGER def is_list(self): \"\"\"Test if", "dictionary that contains a parameter declaration. The wrapper provides easy access to the", "\"\"\" self.children.append(para) self.children.sort(key=lambda p: (p.index, p.identifier)) def as_input(self): \"\"\"Flag indicating whether the value", "+ ']' else: val += ' [default \\'' + str(self.default_value) + '\\']' return", "pd.DT_BOOL def is_file(self): \"\"\"Test if data type for the parameter declaration is DT_FILE.", "for parameter data type Raises ------ benchtmpl.error.InvalidParameterError \"\"\" if not data_type in pd.DATA_TYPES:", "template parameter declaration properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional list of parameter children for", "to add an element to the list of children of the parameter. Parameters", "self).__init__( identifier=obj[pd.LABEL_ID], data_type = obj[pd.LABEL_DATATYPE] ) self.obj = obj self.name = obj[pd.LABEL_NAME] self.description", "base class maintains the unique parameter identifier and the information about the data", "obj[pd.LABEL_DESCRIPTION] self.index = obj[pd.LABEL_INDEX] self.default_value = obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in obj else None", "pd.LABEL_DEFAULT in obj else None self.is_required = obj[pd.LABEL_REQUIRED] self.values = obj[pd.LABEL_VALUES] if pd.LABEL_VALUES", "and/or modify it under the # terms of the MIT License; see LICENSE", "__init__(self, identifier, data_type): \"\"\"Initialize the unique identifier and data type. Raises value error", "under the # terms of the MIT License; see LICENSE file for more", "structure, and (iii) render UI forms to collect parameter values. \"\"\" from benchtmpl.error", "= children def add_child(self, para): \"\"\"Short-cut to add an element to the list", "forms to collect parameter values. \"\"\" from benchtmpl.error import InvalidParameterError import benchtmpl.workflow.parameter.declaration as", "------- bool \"\"\" return self.data_type == pd.DT_DECIMAL def is_int(self): \"\"\"Test if data type", "if pd.LABEL_AS in obj else None self.children = children def add_child(self, para): \"\"\"Short-cut", "identifier and the information about the data type. \"\"\" def __init__(self, identifier, data_type):", "the unique identifier and data type. Raises value error if the given data", "declaration. The prompt contains an indication of the data type, the parameter name", "of the Reproducible Open Benchmarks for Data Analysis # Platform (ROB). # #", "the Reproducible Open Benchmarks for Data Analysis # Platform (ROB). # # Copyright", "ROB is free software; you can redistribute it and/or modify it under the", "details. \"\"\"Base class for workflow template parameters. Each parameter has a set of", "Benchmarks for Data Analysis # Platform (ROB). # # Copyright (C) 2019 NYU.", "the parameter, (ii) define a nested parameter structure, and (iii) render UI forms", "is DT_RECORD. Returns ------- bool \"\"\" return self.data_type == pd.DT_RECORD def is_string(self): \"\"\"Test", "if self.is_bool() or self.is_float() or self.is_int(): val += ' [default ' + str(self.default_value)", "or self.is_float() or self.is_int(): val += ' [default ' + str(self.default_value) + ']'", "the special value that indicates that the property value is provided by the", "the parameter declaration is DT_RECORD. Returns ------- bool \"\"\" return self.data_type == pd.DT_RECORD", "UI forms to collect parameter values. \"\"\" from benchtmpl.error import InvalidParameterError import benchtmpl.workflow.parameter.declaration", "pd \"\"\"Special value for as-property that indicates user input for target path of", "prompt for the parameter declaration. The prompt contains an indication of the data", "(file)' elif self.is_float(): val += ' (decimal)' elif self.is_int(): val += ' (integer)'", "children for parameter lists or records \"\"\" super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID], data_type = obj[pd.LABEL_DATATYPE]", "Identifier for parameter data type Raises ------ benchtmpl.error.InvalidParameterError \"\"\" if not data_type in", "\\'{}\\''.format(data_type)) self.identifier = identifier self.data_type = data_type def is_bool(self): \"\"\"Test if data type", "bool \"\"\" return self.data_type == pd.DT_INTEGER def is_list(self): \"\"\"Test if data type for", "set of properties that are used to (i) identify the parameter, (ii) define", "attributes of a template parameter declaration from a given dictionary. Parameters ---------- obj:", "def prompt(self): \"\"\"Get default input prompt for the parameter declaration. The prompt contains", "TemplateParameter(ParameterBase): \"\"\"The template parameter is a simple wrapper around a dictionary that contains", "in obj else None self.as_constant = obj[pd.LABEL_AS] if pd.LABEL_AS in obj else None", "Parameters ---------- para: benchtmpl.workflow.parameter.base.TemplateParameter Template parameter instance for child parameter \"\"\" self.children.append(para) self.children.sort(key=lambda", "for Data Analysis # Platform (ROB). # # Copyright (C) 2019 NYU. #", "pd.DT_STRING class TemplateParameter(ParameterBase): \"\"\"The template parameter is a simple wrapper around a dictionary", "= obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in obj else None self.as_constant = obj[pd.LABEL_AS] if pd.LABEL_AS", "type for the parameter declaration is DT_INTEGER. Returns ------- bool \"\"\" return self.data_type", "'$input' class ParameterBase(object): \"\"\"Base class for template parameter and parameter argument values. The", "pd.DATA_TYPES: raise InvalidParameterError('invalid data type \\'{}\\''.format(data_type)) self.identifier = identifier self.data_type = data_type def", "declaration from a given dictionary. Parameters ---------- obj: dict Dictionary containing the template", "parameter declaration from a given dictionary. Parameters ---------- obj: dict Dictionary containing the", "properties that are used to (i) identify the parameter, (ii) define a nested", "constant property is the special value that indicates that the property value is", "children is not None and not empty. Returns ------- bool \"\"\" if not", "bool \"\"\" return self.data_type == pd.DT_RECORD def is_string(self): \"\"\"Test if data type for", "name and the default value (if defined). Returns ------- string \"\"\" val =", "parameter has a set of properties that are used to (i) identify the", "has_children(self): \"\"\"Test if a parameter has children. Only returns True if the list", "obj else None self.parent = obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in obj else None self.as_constant", "as-property that indicates user input for target path of uploaded files. \"\"\" AS_INPUT", "not empty. Returns ------- bool \"\"\" if not self.children is None: return len(self.children)", "else None self.parent = obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in obj else None self.as_constant =", "= obj self.name = obj[pd.LABEL_NAME] self.description = obj[pd.LABEL_DESCRIPTION] self.index = obj[pd.LABEL_INDEX] self.default_value =", "the parameter declaration is DT_LIST. Returns ------- bool \"\"\" return self.data_type == pd.DT_LIST", "\"\"\" return self.as_constant def has_children(self): \"\"\"Test if a parameter has children. Only returns", "self.is_int(): val += ' (integer)' elif self.is_string(): val += ' (string)' if not", "bool \"\"\" return self.data_type == pd.DT_STRING class TemplateParameter(ParameterBase): \"\"\"The template parameter is a", "== pd.DT_RECORD def is_string(self): \"\"\"Test if data type for the parameter declaration is", "# This file is part of the Reproducible Open Benchmarks for Data Analysis", "\"\"\" from benchtmpl.error import InvalidParameterError import benchtmpl.workflow.parameter.declaration as pd \"\"\"Special value for as-property", "data_type = obj[pd.LABEL_DATATYPE] ) self.obj = obj self.name = obj[pd.LABEL_NAME] self.description = obj[pd.LABEL_DESCRIPTION]", "Returns ------- string \"\"\" val = str(self.name) # Add text that indicates the", "different attributes of a template parameter declaration from a given dictionary. Parameters ----------", "is DT_DECIMAL. Returns ------- bool \"\"\" return self.data_type == pd.DT_DECIMAL def is_int(self): \"\"\"Test", "for child parameter \"\"\" self.children.append(para) self.children.sort(key=lambda p: (p.index, p.identifier)) def as_input(self): \"\"\"Flag indicating", "len(self.children) > 0 return False def has_constant(self): \"\"\"True if the as_constant property is", "\"\"\" if not self.children is None: return len(self.children) > 0 return False def", "pd.LABEL_VALUES in obj else None self.parent = obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in obj else", "+= ' [default ' + str(self.default_value) + ']' else: val += ' [default", "def add_child(self, para): \"\"\"Short-cut to add an element to the list of children", "' (integer)' elif self.is_string(): val += ' (string)' if not self.default_value is None:", "\"\"\" val = str(self.name) # Add text that indicates the parameter type if", "error if the given data type identifier is not valid. Parameters ---------- identifier:", "collect parameter values. \"\"\" from benchtmpl.error import InvalidParameterError import benchtmpl.workflow.parameter.declaration as pd \"\"\"Special", "is DT_STRING. Returns ------- bool \"\"\" return self.data_type == pd.DT_STRING class TemplateParameter(ParameterBase): \"\"\"The", "in obj else None self.parent = obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in obj else None", "is DT_LIST. Returns ------- bool \"\"\" return self.data_type == pd.DT_LIST def is_record(self): \"\"\"Test", "provides easy access to the different components of the parameter declaration. \"\"\" def", "\"\"\"Base class for template parameter and parameter argument values. The base class maintains", "Add text that indicates the parameter type if self.is_bool(): val += ' (bool)'", "list of children of the parameter. Parameters ---------- para: benchtmpl.workflow.parameter.base.TemplateParameter Template parameter instance", "\"\"\" return self.data_type == pd.DT_INTEGER def is_list(self): \"\"\"Test if data type for the", "The base class maintains the unique parameter identifier and the information about the", "type. Raises value error if the given data type identifier is not valid.", "DT_RECORD. Returns ------- bool \"\"\" return self.data_type == pd.DT_RECORD def is_string(self): \"\"\"Test if", "list of children is not None and not empty. Returns ------- bool \"\"\"", "Raises value error if the given data type identifier is not valid. Parameters", "# ROB is free software; you can redistribute it and/or modify it under", "None. Returns ------- bool \"\"\" return not self.as_constant is None def prompt(self): \"\"\"Get", "def is_record(self): \"\"\"Test if data type for the parameter declaration is DT_RECORD. Returns", "an element to the list of children of the parameter. Parameters ---------- para:", "---------- obj: dict Dictionary containing the template parameter declaration properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional", "it under the # terms of the MIT License; see LICENSE file for", "more details. \"\"\"Base class for workflow template parameters. Each parameter has a set", "' def to_dict(self): \"\"\"Get the dictionary serialization for the parameter declaration. Returns -------", "for the parameter declaration is DT_LIST. Returns ------- bool \"\"\" return self.data_type ==", "bool \"\"\" return self.data_type == pd.DT_BOOL def is_file(self): \"\"\"Test if data type for", "[default \\'' + str(self.default_value) + '\\']' return val + ': ' def to_dict(self):", "instance for child parameter \"\"\" self.children.append(para) self.children.sort(key=lambda p: (p.index, p.identifier)) def as_input(self): \"\"\"Flag", "LICENSE file for more details. \"\"\"Base class for workflow template parameters. Each parameter", "parameter identifier data_type: string Identifier for parameter data type Raises ------ benchtmpl.error.InvalidParameterError \"\"\"", "------- bool \"\"\" return self.data_type == pd.DT_LIST def is_record(self): \"\"\"Test if data type", "import benchtmpl.workflow.parameter.declaration as pd \"\"\"Special value for as-property that indicates user input for", "as pd \"\"\"Special value for as-property that indicates user input for target path", "Optional list of parameter children for parameter lists or records \"\"\" super(TemplateParameter, self).__init__(", "val += ' (integer)' elif self.is_string(): val += ' (string)' if not self.default_value", "\"\"\" super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID], data_type = obj[pd.LABEL_DATATYPE] ) self.obj = obj self.name =", "text that indicates the parameter type if self.is_bool(): val += ' (bool)' elif", "wrapper provides easy access to the different components of the parameter declaration. \"\"\"", "for the parameter declaration is DT_RECORD. Returns ------- bool \"\"\" return self.data_type ==", "+ str(self.default_value) + '\\']' return val + ': ' def to_dict(self): \"\"\"Get the", "Raises ------ benchtmpl.error.InvalidParameterError \"\"\" if not data_type in pd.DATA_TYPES: raise InvalidParameterError('invalid data type", "DT_BOOL. Returns ------- bool \"\"\" return self.data_type == pd.DT_BOOL def is_file(self): \"\"\"Test if", "is None: if self.is_bool() or self.is_float() or self.is_int(): val += ' [default '", "type for the parameter declaration is DT_FILE. Returns ------- bool \"\"\" return self.data_type", "identifier data_type: string Identifier for parameter data type Raises ------ benchtmpl.error.InvalidParameterError \"\"\" if", "': ' def to_dict(self): \"\"\"Get the dictionary serialization for the parameter declaration. Returns", "(ii) define a nested parameter structure, and (iii) render UI forms to collect", "is DT_INTEGER. Returns ------- bool \"\"\" return self.data_type == pd.DT_INTEGER def is_list(self): \"\"\"Test", "elif self.is_string(): val += ' (string)' if not self.default_value is None: if self.is_bool()", "# Platform (ROB). # # Copyright (C) 2019 NYU. # # ROB is", "if pd.LABEL_DEFAULT in obj else None self.is_required = obj[pd.LABEL_REQUIRED] self.values = obj[pd.LABEL_VALUES] if", "\"\"\"Test if data type for the parameter declaration is DT_STRING. Returns ------- bool", "self.default_value = obj[pd.LABEL_DEFAULT] if pd.LABEL_DEFAULT in obj else None self.is_required = obj[pd.LABEL_REQUIRED] self.values", "= identifier self.data_type = data_type def is_bool(self): \"\"\"Test if data type for the", "empty. Returns ------- bool \"\"\" if not self.children is None: return len(self.children) >", "template parameter is a simple wrapper around a dictionary that contains a parameter", "type for the parameter declaration is DT_DECIMAL. Returns ------- bool \"\"\" return self.data_type", "self.identifier = identifier self.data_type = data_type def is_bool(self): \"\"\"Test if data type for", "self.parent = obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in obj else None self.as_constant = obj[pd.LABEL_AS] if", "Open Benchmarks for Data Analysis # Platform (ROB). # # Copyright (C) 2019", "\"\"\"Test if data type for the parameter declaration is DT_INTEGER. Returns ------- bool", "for target path of uploaded files. \"\"\" AS_INPUT = '$input' class ParameterBase(object): \"\"\"Base", "components of the parameter declaration. \"\"\" def __init__(self, obj, children=None): \"\"\"Initialize the different", "not self.default_value is None: if self.is_bool() or self.is_float() or self.is_int(): val += '", "the # terms of the MIT License; see LICENSE file for more details.", "------- bool \"\"\" return self.data_type == pd.DT_RECORD def is_string(self): \"\"\"Test if data type", "prompt(self): \"\"\"Get default input prompt for the parameter declaration. The prompt contains an", "not data_type in pd.DATA_TYPES: raise InvalidParameterError('invalid data type \\'{}\\''.format(data_type)) self.identifier = identifier self.data_type", "(p.index, p.identifier)) def as_input(self): \"\"\"Flag indicating whether the value for the as constant", "add_child(self, para): \"\"\"Short-cut to add an element to the list of children of", "self.as_constant == AS_INPUT def get_constant(self): \"\"\"Get the value of the as_constant property. Returns", "data_type in pd.DATA_TYPES: raise InvalidParameterError('invalid data type \\'{}\\''.format(data_type)) self.identifier = identifier self.data_type =", "if the given data type identifier is not valid. Parameters ---------- identifier: string", "that contains a parameter declaration. The wrapper provides easy access to the different", "property is the special value that indicates that the property value is provided", "value (if defined). Returns ------- string \"\"\" val = str(self.name) # Add text", "uploaded files. \"\"\" AS_INPUT = '$input' class ParameterBase(object): \"\"\"Base class for template parameter", "self.is_float(): val += ' (decimal)' elif self.is_int(): val += ' (integer)' elif self.is_string():", "def get_constant(self): \"\"\"Get the value of the as_constant property. Returns ------- string \"\"\"", "DT_FILE. Returns ------- bool \"\"\" return self.data_type == pd.DT_FILE def is_float(self): \"\"\"Test if", "parameter declaration. The wrapper provides easy access to the different components of the", "self.data_type == pd.DT_RECORD def is_string(self): \"\"\"Test if data type for the parameter declaration", "a parameter declaration. The wrapper provides easy access to the different components of", "+ '\\']' return val + ': ' def to_dict(self): \"\"\"Get the dictionary serialization", "[default ' + str(self.default_value) + ']' else: val += ' [default \\'' +", "= obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in obj else None self.parent = obj[pd.LABEL_PARENT] if pd.LABEL_PARENT", "in pd.DATA_TYPES: raise InvalidParameterError('invalid data type \\'{}\\''.format(data_type)) self.identifier = identifier self.data_type = data_type", "\"\"\" return self.data_type == pd.DT_STRING class TemplateParameter(ParameterBase): \"\"\"The template parameter is a simple", "\\'' + str(self.default_value) + '\\']' return val + ': ' def to_dict(self): \"\"\"Get", "is provided by the user. \"\"\" return self.as_constant == AS_INPUT def get_constant(self): \"\"\"Get", "a dictionary that contains a parameter declaration. The wrapper provides easy access to", "== pd.DT_LIST def is_record(self): \"\"\"Test if data type for the parameter declaration is", "return self.data_type == pd.DT_FILE def is_float(self): \"\"\"Test if data type for the parameter", "parameter declaration. The prompt contains an indication of the data type, the parameter", "parameter declaration is DT_FILE. Returns ------- bool \"\"\" return self.data_type == pd.DT_FILE def", "------- bool \"\"\" return self.data_type == pd.DT_INTEGER def is_list(self): \"\"\"Test if data type", "self.children = children def add_child(self, para): \"\"\"Short-cut to add an element to the", "identifier self.data_type = data_type def is_bool(self): \"\"\"Test if data type for the parameter", "of the as_constant property. Returns ------- string \"\"\" return self.as_constant def has_children(self): \"\"\"Test", "can redistribute it and/or modify it under the # terms of the MIT", "lists or records \"\"\" super(TemplateParameter, self).__init__( identifier=obj[pd.LABEL_ID], data_type = obj[pd.LABEL_DATATYPE] ) self.obj =", "import InvalidParameterError import benchtmpl.workflow.parameter.declaration as pd \"\"\"Special value for as-property that indicates user", "AS_INPUT def get_constant(self): \"\"\"Get the value of the as_constant property. Returns ------- string", "children. Only returns True if the list of children is not None and", "+= ' (decimal)' elif self.is_int(): val += ' (integer)' elif self.is_string(): val +=", "data type identifier is not valid. Parameters ---------- identifier: string Unique parameter identifier", "the MIT License; see LICENSE file for more details. \"\"\"Base class for workflow", "obj: dict Dictionary containing the template parameter declaration properties children: list(benchtmpl.workflow.parameter.base.TemplateParameter), optional Optional", "about the data type. \"\"\" def __init__(self, identifier, data_type): \"\"\"Initialize the unique identifier", "maintains the unique parameter identifier and the information about the data type. \"\"\"", "NYU. # # ROB is free software; you can redistribute it and/or modify", "a simple wrapper around a dictionary that contains a parameter declaration. The wrapper", "None self.parent = obj[pd.LABEL_PARENT] if pd.LABEL_PARENT in obj else None self.as_constant = obj[pd.LABEL_AS]", "value of the as_constant property. Returns ------- string \"\"\" return self.as_constant def has_children(self):", "that are used to (i) identify the parameter, (ii) define a nested parameter", "returns True if the list of children is not None and not empty.", "type if self.is_bool(): val += ' (bool)' elif self.is_file(): val += ' (file)'", "class for workflow template parameters. Each parameter has a set of properties that", "obj self.name = obj[pd.LABEL_NAME] self.description = obj[pd.LABEL_DESCRIPTION] self.index = obj[pd.LABEL_INDEX] self.default_value = obj[pd.LABEL_DEFAULT]", "(string)' if not self.default_value is None: if self.is_bool() or self.is_float() or self.is_int(): val", "DT_LIST. Returns ------- bool \"\"\" return self.data_type == pd.DT_LIST def is_record(self): \"\"\"Test if", "else None self.is_required = obj[pd.LABEL_REQUIRED] self.values = obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in obj else", "not valid. Parameters ---------- identifier: string Unique parameter identifier data_type: string Identifier for", "the unique parameter identifier and the information about the data type. \"\"\" def", "you can redistribute it and/or modify it under the # terms of the", "in obj else None self.is_required = obj[pd.LABEL_REQUIRED] self.values = obj[pd.LABEL_VALUES] if pd.LABEL_VALUES in", "see LICENSE file for more details. \"\"\"Base class for workflow template parameters. Each", "self.is_int(): val += ' [default ' + str(self.default_value) + ']' else: val +=", "identifier and data type. Raises value error if the given data type identifier", "return self.as_constant == AS_INPUT def get_constant(self): \"\"\"Get the value of the as_constant property.", "+= ' (bool)' elif self.is_file(): val += ' (file)' elif self.is_float(): val +=", "False def has_constant(self): \"\"\"True if the as_constant property is not None. Returns -------", "the parameter declaration. The prompt contains an indication of the data type, the", "defined). Returns ------- string \"\"\" val = str(self.name) # Add text that indicates", "parameter declaration is DT_RECORD. Returns ------- bool \"\"\" return self.data_type == pd.DT_RECORD def", "def __init__(self, obj, children=None): \"\"\"Initialize the different attributes of a template parameter declaration", "pd.DT_LIST def is_record(self): \"\"\"Test if data type for the parameter declaration is DT_RECORD.", "indicates user input for target path of uploaded files. \"\"\" AS_INPUT = '$input'" ]
[ "guidance. they can be removed. #ES: a list of the transcript's timestamps t_list", "sure your timestamps are in ascending order and that there are no mistakes", "the snippet object here. # Setting \"ref = ref[key]\" means that in the", "= raw_input(\"\\n6.3.2 Please input your interviewee's name as it appears in the transcript:", "it appears in the transcript: \") interviewee = raw_input(\"\\n6.3.2 Please input your interviewee's", "0 c = 0 #ES: several print commands were added for guidance. they", "in that object. prop_array = p.split('.') ref = resource for pa in range(0,", "already uploaded to Youtube. Now trying to resume uploading the remaining snippets...)\" time.sleep(1)", "snippets for syncing with your video snippets? (y) \") answer = verify_y_n_none(answer) if", "+ \" video snippets will created. Continue?\" print \"\\nIf all input was correct,", "inserted resource. if properties[p]: if is_array: ref[key] = properties[p].split(',') else: ref[key] = properties[p]", "snipVideos = True elif answer == 'n': snipVideos = False elif answer ==", "key like \"snippet.title\", split into \"snippet\" and \"title\", where # \"snippet\" will be", "\" video snippets will created. Continue?\" print \"\\nIf all input was correct, the", "trying to resume uploading the remaining snippets...)\" time.sleep(1) for s in splits: c", "entire pipeline. Creating dummy file 'delete me.txt' to finish pipeline.\" foo = open(folderName", "answer == 'n': uploadVideos = False elif answer == '': uploadVideos = True", "to Youtube for syncing? (y) \") answer = verify_y_n_none(answer) if answer == 'y':", "of your timestamps isn't formatted correctly. Consult README.md for guidelines on proper timestamp", "foo.close() with open(folderName + \"/\" + \"delete me.txt\", 'r') as myfile: text =", "= str(raw_input(question+' (y/n): ')).lower().strip() if reply[0] == 'y': return True if reply[0] ==", "False #ES: IF you enabled 'resampleSubtitles' (above), you have the option to remove", "be set to True or False (make a variable for this) compiledSubs =", "create a playlist in youtube online and copy url id to script #playlistID", "time.sleep(2) exit() print \"\\n\" elif combineSubtitles == True: #in this case, the user", "remove_empty_kwargs(**kwargs): good_kwargs = {} if kwargs is not None: for key, value in", "e: print e print \"\\n One of your timestamps isn't formatted correctly. Consult", "= 'Frederic' #originalVideo = \"Frederic.mov\" #interviewer = \"M.M.\" #interviewee = \"B.K.\" #fileName =", "answer == 'n': placeBasedTimestamping = False elif answer == '': placeBasedTimestamping = False", "+ \"delete me.txt\", 'r') as myfile: text = myfile.read().replace('\\n', '') with open(folderName +", "correct, the program will begin snipping and uploading content to Youtube for processing.", "== 'y': return True if reply[0] == '': return True if reply[0] ==", "= insert_result[\"snippet\"][\"status\"] #print \"Uploaded caption track '%s(%s) in '%s' language, '%s' status.\" %", "once subtitles have been successfully generated? (n) \") answer = verify_y_n_none(answer) if answer", "and then retrying...\" % sleep_seconds time.sleep(sleep_seconds) return response['id'] if uploadTranscripts == True or", "based on a list of properties given as key-value pairs. # Leave properties", "complete deleteVideos = False #ES: upload the full video and compiled transcript to", "Consult README.md for guidelines on proper timestamp formatting.\" print \"\\nVerifying if timestamps are", "as to how many videos will be uploaded. question = \"\\nThere were \"", "f) if wait == True: print \"\\nWaiting for videos to be processed. It", "Call the API's captions.insert method to upload a caption track in draft status.", "\".srt\", 'w') #thefile.write(compiledSubs) if uploadFull == True: print \"\\nUploading full video...\" vid =", "properties[p].split(',') else: ref[key] = properties[p] elif key not in ref: # For example,", "will use this code for processing your files. Continue? (y/n) \") if verifyLanguage.lower()", "sys import httplib import random from apiclient.discovery import build from apiclient.errors import HttpError", "str(c) with open(folderName + \"/\" + fileName + \"_\" + str(c) + \".txt\",", "beginning of the line is not a digit and is not a next-line", "splits: print c,s,captionsids[c-1] sub_txt = \"\" # while waitLonger == True: # try:", "download snippet subtitle files (.vtt) downloadCaptions = True #ES: delete uploaded video snippets", "your timestamp list (excluding the \\\".txt\\\" extention): \") else: print \"You have not", "+ '/' + fileName + \"_\" + str(c) + \".txt\" #print s,media_file,caption_file,videoids[c-1] a", "youtube.com and then restart the program.\" uploadVideos = True wait = False def", "answers blank. For more advanced users or users who have already used this", "float(t[2]) splits.append([t0,t0+t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t0 +", "print \"\\n\" elif combineSubtitles == True: #in this case, the user has chosen", "snippet object here. # Setting \"ref = ref[key]\" means that in the next", "its timestamps,\\n- snips the associated video accordingly into video snippets,\\n- uploads these video", "downloads the text snippets as subtitle files (.vtt),\\n- stitches these subtitle files together", "<= sp1[1]: print \"\\nThere is a problem with one of your timestamps:\" print", "which your subtitle files are associated. These values will be used as offsets", "print \"\\nSnipping completed. No further options were selected. Exiting...\" exit() #ES: UPLOADS THE", "is smaller.\" print \"Please make sure your timestamps are in ascending order and", "answer == 'y': placeBasedTimestamping = True elif answer == 'n': placeBasedTimestamping = False", "processing. YouTube allows a maximum of 100 video uploads per 24h using the", "file (excluding .txt) #fileName = 'venant' #originalVideo refers to the name of the", "\" + str(len(splits)) + \" timestamps formatted like such '[HH:MM:SS.00]'.\" else: print \"Please", "the same directory with the code. return build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http())) # Explicitly tell", "#CLIENT_SECRETS_FILE = \"client_secret.json\" #api key is <KEY> #client id is in client_id.json CLIENT_SECRETS_FILE", "and restart the program.\" exit() sp1 = sp num+=1 print \"\\nThe document named", "+ \":\" + str(int(m)) + \":\" + str(int(s)) return str(int(h)) + \":\" +", "at any point by pressing Ctrl+C (Cmd+C on Mac).\" time.sleep(1) print \"\\n\" print", "object. ref[key] = {} ref = ref[key] else: # For example, the property", "httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine) # Always retry when an apiclient.errors.HttpError with one", "= False elif answer == '': resampleSubtitles = False if resampleSubtitles == True:", "allows Youtube to sync the video and text snippets\\n- downloads the text snippets", "the # \"for pa in range ...\" loop, we will be setting a", "should be a larger number than the timestamp that comes before it (\",str(sp1[1]),\"", "comes before it (\",str(sp1[1]),\" seconds), but it is smaller.\" print \"Please make sure", "= False elif answer == '': combineSubtitles = True if combineSubtitles == True:", "sleeping time as needed - ES #adjust switches as needed sleepingTime = 400", "a resource based on a list of properties given as key-value pairs. #", "API_SERVICE_NAME = \"youtube\" API_VERSION = \"v3\" # This variable defines a message to", "and \"title\" will be a property in that object. prop_array = p.split('.') ref", "status. def upload_caption(youtube, video_id, language, name, file): insert_result = youtube.captions().insert( part=\"snippet\", body=dict( snippet=dict(", "a valid file location.') vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage': language,", "#ES: cut video into snippets based on the transcript's timestamps (must be set", "all input was correct, the program will begin snipping\" yes_or_no(question) print \"\\n1. Slicing", "folder containing your transcript and/or video and/or subtitle files\\n(this folder must be located", "application...\" time.sleep(2) exit() print \"\\n\" elif combineSubtitles == True: #in this case, the", "other processes to run) snipVideos = True #ES: upload video snippets uploadVideos =", "\"V.S.\" #where the video and txt files are stored #folderName = 'venant' #fileName", "2 ** retry sleep_seconds = random.random() * max_sleep print \"Sleeping %f seconds and", "times so that the rest of the pipeline can run if combine_only ==", "**kwargs ).execute() print_results(results) #'snippet.playlistId': playlistID, playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId': vid, 'snippet.position': ''}, part='snippet',", "MediaFileUpload from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser,", "if answer == \"n\": print \"Please make sure you have the available space", "divmod(m, 60) #print str(int(h)) + \":\" + str(int(m)) + \":\" + str(int(s)) return", "user has chosen to only combine subtitles. the switch combine_only allows some different", "videos.insert def videos_insert(properties, media_file, **kwargs): resource = build_resource(properties) # See full sample for", "float(t[2]) splits.append([t0,t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t1 except", "your hard drive to run this program. Continue? (y/n) \") answer = verify_y_n(answer)", "a = raw_input(\"Please answer 'y' or 'n', or leave the answer blank by", "and ENABLE it click \"create credentials\" create and \"OAUT client id\" \"\"\" #CLIENT_SECRETS_FILE", "function kwargs = remove_empty_kwargs(**kwargs) # See full sample for function request = service.videos().insert(", "response is None: try: print \"Uploading file...\" status, response = request.next_chunk() if response", "= Storage(\"youtube-api-snippets-oauth2.json\") credentials = storage.get() if credentials is None or credentials.invalid: credentials =", "u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with", "with the code. return build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http())) # Explicitly tell the underlying HTTP", "deleted from Youtube once subtitles have been successfully generated? (n) \") answer =", "this will aggregate phrases (t) into one list item (a text) until a", "of your transcript (excluding the \\\".txt\\\" extention): \") try: verifyExistence = os.stat(folderName +", "t_list.append(t1) t0 = t1 elif len(t) == 3: #if we are only combining", "1 waitLonger = True for s in splits: print c,s,captionsids[c-1] sub_txt = \"\"", "+ captionsids[c-1] + \" to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script", "#originalVideo = \"Oscar.mp4\" ### START BOILERPLATE CODE # Sample Python code for user", "response['id'] videoid = response['id'] elif method != 'insert' or 'id' not in response:", "if t is a timestamp #ES: removing punctuation from '[00:00:01.09]' since it is", "removing punctuation from '[00:00:01.09]' since it is never qualified as a digit (False)", "# This method implements an exponential backoff strategy to resume a # failed", "or leave the answer blank by hitting 'Enter': \") continue print \"\\n\\n\" print", "subtitle files\\n(this folder must be located inside the 'files' folder): \") try: verifyExistence", "of video lengths, then we need to make this into a list of", "is None: try: print \"Uploading file...\" status, response = request.next_chunk() if response is", "# failed upload. def resumable_upload(request, resource, method): response = None error = None", "#ES: PREPARE INPUT TEXT FOR PROCESSING if snipTranscript == True: for t in", "resampleSubtitles = True elif answer == 'n': resampleSubtitles = False elif answer ==", "\"C.V.\" #interviewee = \"V.S.\" #where the video and txt files are stored #folderName", "is None or credentials.invalid: credentials = run_flow(flow, storage, args) # Trusted testers can", "column in http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate two-letter 'ISO 639-1' language code.)\\n\") if language", "print \"(However, it looks like \",len(videoids),\" video snippets were already uploaded to Youtube.", "str(2) + \" minutes...\" # time.sleep(120) sub_txt += subtitle cc = \"\" if", "language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': media_file, 'status.embeddable': '', 'status.license':", "str(c) else: cc = str(c) #print subtitle print cc with open(folderName + \"/\"", "videoId=id, language=language, name=originalVideo, isDraft=True, sync=False ) ), media_body=caption_file ).execute() print \"\\nFull video is", "them in an adjacent subtitle (verify)) removeLoneWords = False #____________# #ES: USER INTERVIEW", "'n': snipVideos = False elif answer == '': snipVideos = True answer =", "'n': combineSubtitles = False elif answer == '': combineSubtitles = True if combineSubtitles", "t0 = 0 c = 0 #ES: several print commands were added for", "by RG that has yet to be explored... placeBasedTimestamping = False #ES: resample", "like to reorganize subtitles to prioritize keeping full sentences intact? (Experimental; this feature", "--> create a new project on the resulting dashboard, \"enable apis and get", "we are only combining subtitle files, and we are using a .txt file", "that there are no mistakes (see README.md) and restart the program.\" exit() sp1", "folder named '\" + folderName + \"' does not exist in the current", "else: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t1]) t_list.append(t1) t0 = t1 elif len(t)", "exploration so as to make sure that place names are never split between", "compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile = open(folderName + \"/\" + fileName + \".srt\", 'w') #thefile.write(compiledSubs)", "= verify_y_n_none(answer) if answer == 'y': placeBasedTimestamping = True elif answer == 'n':", "answer == 'n': deleteVideos = False elif answer == '': deleteVideos = False", "else: if downloadCaptions == True: with open(folderName + \"/\" + 'captionsids.pkl', 'rb') as", "False elif answer == '': removeLoneWords = False answer = raw_input(\"\\n7.2 Would you", "answer == '': downloadCaptions = True answer = raw_input(\"\\n6/7 Would you like your", "> 3 or len(t) < 3: print \"\\nOne of your timestamps (\",':'.join(t) ,\")", "if answer == 'y': snipVideos = True elif answer == 'n': snipVideos =", "print \"\\nUploading transcripts...\" for s in splits: print c,s media_file = folderName +", "oscar #change certain character variables import imageio imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip import", "does not apply to your transcript, simply leave the following two answers blank", "from postprocess_and_fuse_subs import compileSubs import pickle import os #adjust sleeping time as needed", "id=videoids[c-1] ).execute() c += 1 time.sleep(10) if combineSubtitles == True: #compiles them all", "value: good_kwargs[key] = value return good_kwargs ### END BOILERPLATE CODE # Sample python", "folderName + \"/\" + originalVideo, part='snippet,status') # place video in custom playlist def", "False #ES: combine vtt snippets that were downloaded from Youtube into a total", "without values out of inserted resource. if properties[p]: if is_array: ref[key] = properties[p].split(',')", "options for running this application. Exiting...\" exit() while True: language = raw_input(\"Enter the", "+= 1 #ES: printing deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c: \" + str(c) with", "to ask you a few questions...\\\"), please input them. If this does not", "str(len(splits)) + \" timestamps formatted like such '[HH:MM:SS.00]'.\" else: print \"Please set the", "vid = resumable_upload(request, 'video', 'insert') # See full sample for function return vid", "% sleep_seconds time.sleep(sleep_seconds) return response['id'] if uploadTranscripts == True or resumeUploads == True", "raw_input(\"\\n7.1 Would you like to reorganize subtitles according to punctuation? (Experimental; can lead", "not exist in the folder '\" + folderName + \"'. Please see README.md", "the 'files' folder): \") try: verifyExistence = os.stat(folderName).st_size except Exception as e: print", "as e: print e print \"\\nThe program is unable to resume uploads because", "code for processing your files. Continue? (y/n) \") if verifyLanguage.lower() == '' or", "to run: \\n\\n\" time.sleep(2) answer = raw_input(\"\\n1/7 Will you be cutting your video", "position of c in texts (a chunk of text prior to timestamp) to", "\") continue print \"\\n\\n\" print \"This application creates subtitles for a video for", "the program.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" elif combineSubtitles == True:", "found because you are not running the entire pipeline. Creating dummy file 'delete", "total subtitle file. combineSubtitles = True #ES: the following switches control how subtitles", "#print s,media_file,caption_file,videoids[c-1] a = service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=videoids[c-1], language=language, name=media_file, isDraft=True, sync=True", "next time through the # \"for pa in range ...\" loop, we will", "and improve the subtitle structure overall (can lead to short, choppy, fast subtitles", "want to continue where you left off (uploadVideos must still be set to", "only visible to your account,\\n- uploads the text snippets to Youtube as transcript", "[] # #for search_result in search_response.get(\"items\", []): # videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"])) # #print", "= True elif answer == 'n': resampleSubtitles = False elif answer == '':", "key = prop_array[pa] # Convert a name like \"snippet.tags[]\" to snippet.tags, but handle", "Create the snippet object here. # Setting \"ref = ref[key]\" means that in", "valid file location.') vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage': language, 'snippet.description':", "number #\",str(num+2),\" (equivalent to \",str(sp[1]),\" seconds) should be a larger number than the", "True: combine_only = False fileName = raw_input(\"Enter the file name of your transcript", "failed upload. def resumable_upload(request, resource, method): response = None error = None retry", "(\" + str(videoSize) + \" Mb).\" yes_or_no(question) print \"\\n1. Slicing into \" +", "START BOILERPLATE CODE # Sample Python code for user authorization import httplib2 import", "# Convert a name like \"snippet.tags[]\" to snippet.tags, but handle # the value", "\"OAUT client id\" \"\"\" #CLIENT_SECRETS_FILE = \"client_secret.json\" #api key is <KEY> #client id", "A feature created by RG that has yet to be explored... placeBasedTimestamping =", "sleep_seconds = random.random() * max_sleep print \"Sleeping %f seconds and then retrying...\" %", "== 'n': resampleSubtitles = False elif answer == '': resampleSubtitles = False if", "#ES: add t to position c of texts texts[c] += t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit()", "list of video lengths, then we need to make this into a list", "'wb') as f: pickle.dump(videoids, f) if wait == True: print \"\\nWaiting for videos", "snippets based on its timestamps,\\n- snips the associated video accordingly into video snippets,\\n-", "to prioritize keeping full sentences intact? (Experimental; this feature is not recommended since", "to run) snipTranscript = True #ES: cut video into snippets based on the", "end of each line (why?) t += \"\\n\" #ES: if the beginning of", "temporarily require \" + str(videoSize) + \" Mb available space on your hard", "all print \"\\nCombining subtitle snippets ...\" #ES: this is a feature that needs", "these variables according to what story you want to process - ES #interviewer", "units can end up being excessively large) fullSentenceSubtitles = False #ES: IF you", "= ref[key] else: # For example, the property is \"snippet.description\", and the resource", "video.', 'snippet.tags[]': '', 'snippet.title': fileName, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''},", "\".txt\", 'w') as thefile: #thefile = open(folderName + \"/\" + fileName + \"_\"", "= unicode(t, \"UTF-8\") #split the timestamps at : (into 3) t = t.split(\":\")", "print \"\\nUploading compiled subtitles...\" caption_file = folderName + '/' + fileName + \".srt\"", "= True answer = raw_input(\"\\n3/7 Will you be resuming video uploads from a", "+= \"\\n\" #ES: if the beginning of the line is not a digit", "http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate two-letter 'ISO 639-1' language code.)\\n\") if language != '':", "#increase pos on texts by 1 c += 1 #ES: printing deets #print", "\"A retriable error occurred: %s\" % e if error is not None: print", "#thefile.write(compiledSubs) if uploadFull == True: print \"\\nUploading full video...\" vid = videos_insert( {'snippet.categoryId':", "+ \" to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume", "apiclient.discovery import build from apiclient.errors import HttpError from apiclient.http import MediaFileUpload from oauth2client.client", "chosen to only combine subtitles. the switch combine_only allows some different functionality down", "it click \"create credentials\" create and \"OAUT client id\" \"\"\" #CLIENT_SECRETS_FILE = \"client_secret.json\"", "for a video for which you have an associated transcript. Make sure you", "README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" videoSize = os.stat(folderName", "# Leave properties without values out of inserted resource. if properties[p]: if is_array:", "httplib.BadStatusLine) # Always retry when an apiclient.errors.HttpError with one of these status #", "cut-up texts texts = [\"\"] t0 = 0 c = 0 #ES: several", "str(c) + \".mp4\" if not os.path.exists(media_file): exit('Please specify a valid file location.') print", "= True elif answer == 'n': combineSubtitles = False elif answer == '':", "if not os.path.exists(media_file): exit('Please specify a valid file location.') vid = videos_insert( {'snippet.categoryId':", "!= '': verifyLanguage = raw_input(\"\\nYou have entered '\" + language + \"' as", "loop, we will be setting a property in the # resource's \"snippet\" object.", "based on the transcript's timestamps (must be set to True for other processes", "retriable HTTP error %d occurred:\\n%s\" % (e.resp.status,e.content) else: raise except RETRIABLE_EXCEPTIONS, e: error", "invalid\": reply = str(raw_input(question+' (y/n): ')).lower().strip() if reply[0] == 'y': return True if", "custom playlist def playlist_items_insert(properties, **kwargs): resource = build_resource(properties) # See full sample for", "as myfile: text = myfile.read().replace('\\n', '') #print \"ES: replace \\\\n with ''\" with", "fileName + \"_\" + str(c) +\".mp4\") media_file = folderName + '/' + fileName", "the name of the video file including its ext #originalVideo = \"venant.mp4\" #interviewer", "over README.md before proceeding.\" time.sleep(1) print \"You may terminate the application at any", "scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage = Storage(\"youtube-api-snippets-oauth2.json\") credentials = storage.get() if credentials is None or", "exist in the folder '\" + folderName + \"'. Please see README.md for", "values out of the inserted resource. def build_resource(properties): resource = {} for p", "% response) except HttpError, e: if e.resp.status in RETRIABLE_STATUS_CODES: error = \"A retriable", "[]): # videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"])) # #print \"Videos:\\n\", \"\\n\".join(videos), \"\\n\" #ES: I don't", "restart the program.\" exit() sp1 = sp num+=1 print \"\\nThe document named '\"", "is a timestamp #ES: removing punctuation from '[00:00:01.09]' since it is never qualified", "BOILERPLATE CODE # Sample Python code for user authorization import httplib2 import os", "subtitles...\" caption_file = folderName + '/' + fileName + \".srt\" service.captions().insert( part=\"snippet\", body=dict(", "in a folder name called oscar #change certain character variables import imageio imageio.plugins.ffmpeg.download()", "combine_only = False fileName = raw_input(\"Enter the file name of your transcript (excluding", "to your Youtube account once complete uploadFull = False #ES: combine vtt snippets", "httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine) # Always retry when an apiclient.errors.HttpError with", "'status.publicStatsViewable': ''}, media_file, part='snippet,status') videoids.append(vid) print videoids #c += 1 wait = True", "'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': fileName, 'status.embeddable': '', 'status.license': '',", "called oscar #change certain character variables import imageio imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip", "(excluding the \\\".txt\\\" extention): \") try: verifyExistence = os.stat(folderName + '/' + fileName", "these status # codes is raised. RETRIABLE_STATUS_CODES = [500, 502, 503, 504] #", "if answer == 'y': downloadCaptions = True elif answer == 'n': downloadCaptions =", "Youtube as transcript files for these video snippets,\\n- allows Youtube to sync the", "large) fullSentenceSubtitles = False #ES: IF you enabled 'resampleSubtitles' (above), you have the", "lead to short, choppy, fast subtitles that are hard to read) (n) \")", "localtime()),\". Script will resume in \" + str(2) + \" minutes...\" # time.sleep(120)", "'n': uploadVideos = False elif answer == '': uploadVideos = True answer =", "raw_input(\"\\n6/7 Would you like your uploaded video snippets to be deleted from Youtube", "uploading the remaining snippets...)\" time.sleep(1) for s in splits: c += 1 if", "or downloadCaptions == True or deleteVideos == True: combine_only = False fileName =", "\"ref = ref[key]\" means that in the next time through the # \"for", "'rb') as f:videoids = pickle.load(f) except Exception as e: print e print \"\\nThe", "print \"exiting application...\" time.sleep(2) exit() print \"\\n\" if snipVideos == True or uploadTranscripts", "media_file, **kwargs): resource = build_resource(properties) # See full sample for function kwargs =", "Exception as e: print e print \"The file named '\" + originalVideo +", "= raw_input(\"Enter the language code of your video and transcript or the intended", "False elif answer == '': deleteVideos = False answer = raw_input(\"\\n7/7 Will you", "uploadTranscripts = True elif answer == 'n': uploadTranscripts = False elif answer ==", "raw_input(\"\\n2/7 Will you be uploading video snippets to Youtube for syncing? (y) \")", "= p.split('.') ref = resource for pa in range(0, len(prop_array)): is_array = False", "array. if key[-2:] == '[]': key = key[0:len(key)-2:] is_array = True if pa", "u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: #", "u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: # pickle.dump(videoids,", "are using a .txt file with a list of video lengths, then we", "= False elif answer == '': resumeUploads = False answer = raw_input(\"\\n4/7 Will", "myfile try: with open(folderName + \"/\" + fileName + \".txt\", 'r') as myfile:", "such '[HH:MM:SS.00]'.\" else: print \"Please set the variable 'snipTranscript' to True so that", "to finish pipeline.\" foo = open(folderName + \"/\" + \"delete me.txt\",\"w+\") foo.close() with", "1 #ES: printing deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c: \" + str(c) with open(folderName", "It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(sleepingTime/60) + \" minutes...\"", "False #ES: upload the full video and compiled transcript to your Youtube account", "+ \"/\" + fileName + \"_\" + str(c) +\".mp4\") media_file = folderName +", "youtube.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=video_id, language=language, name=name, isDraft=True ) ), media_body=file ).execute() id", "# See full sample for function request = service.videos().insert( body=resource, media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True),", "\":\" + str(int(s)) return str(int(h)) + \":\" + str(int(m)) + \":\" + str(int(sec))", "do this manually on youtube.com and then restart the program.\" uploadVideos = True", "or deleteVideos == True: args = argparser.parse_args() service = get_authenticated_service(args) def print_results(results): print(results)", "rodolphe know if there is a problem with playlist id, might need to", "into \"snippet\" and \"title\", where # \"snippet\" will be an object and \"title\"", "id is in client_id.json CLIENT_SECRETS_FILE = \"client_id.json\" # This OAuth 2.0 access scope", "answer == '': snipVideos = True answer = raw_input(\"\\n2/7 Will you be uploading", "False if resampleSubtitles == True: answer = raw_input(\"\\n7.1.1 Would you like to reorganize", "\"client_id.json\" # This OAuth 2.0 access scope allows for full read/write access to", "- ES #adjust switches as needed sleepingTime = 400 #___SWITCHES(defaults)___# #ES: cut transcript", "+ \".vtt\", 'w') as thefile: #thefile.write(sub_txt) thefile.write(subtitle) if cc == \"31\": print subtitle", "t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2])) return [t0,t1] def s_to_hms(seconds): m, sec", "combining the downloaded subtitle snippets into a single subtitle file for your video?", "'resampleSubtitles' (above), you have the option to remove subtitle entries which may be", "isDraft=True, sync=False ) ), media_body=caption_file ).execute() print \"\\nFull video is soon available on", "= os.stat(folderName + '/' + originalVideo).st_size except Exception as e: print e print", "uploaded video snippets from your Youtube account once subtitle processing is complete deleteVideos", "Will you be combining the downloaded subtitle snippets into a single subtitle file", "to process - ES #interviewer = \"C.V.\" #interviewee = \"V.S.\" #where the video", "t0 + t1 else: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t1]) #print", "+ \".txt\") as f: text = f.readlines() except IOError as e: print \"No", "= False #ES: upload snippet transcripts (.txt) uploadTranscripts = True #ES: download snippet", "removeLoneWords = False elif answer == '': removeLoneWords = False answer = raw_input(\"\\n7.2", "or the intended language code of your subtitles (e.g. en, fr, es, etc.):\\n(You", "# we are handling retry logic ourselves. httplib2.RETRIES = 1 # Maximum number", "subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger = False # except: # waitLonger = True", "snippets (y) \") answer = verify_y_n_none(answer) if answer == 'y': snipVideos = True", ") ), media_body=caption_file ).execute() captionsids.append(a['id']) c += 1 #print a wait = True", "u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw',", "(excluding .txt) #fileName = 'venant' #originalVideo refers to the name of the video", "list of cumulative times so that the rest of the pipeline can run", "object. ref = ref[key] return resource # Remove keyword arguments that are not", "subtitles, and improve the subtitle structure overall (can lead to short, choppy, fast", "remove lone words? (Experimental) (n) \") answer = verify_y_n_none(answer) if answer == 'y':", "#ES: cut transcript into snippets based on the transcript's timestamps (must be set", "+ \"'. Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print", "a.lower().strip() if a == 'y' or a == 'n': return a else: a", "print_results(results): print(results) # Build a resource based on a list of properties given", "yet have a \"snippet\" object. Create the snippet object here. # Setting \"ref", "structure overall (can lead to short, choppy, fast subtitles that are hard to", ") ), media_body=file ).execute() id = insert_result[\"id\"] name = insert_result[\"snippet\"][\"name\"] language = insert_result[\"snippet\"][\"language\"]", "read) resampleSubtitles = False #ES: IF you enabled 'resampleSubtitles' (above), you have the", "single subtitle file for your video? (y) \") answer = verify_y_n_none(answer) if answer", "time.sleep(sleepingTime) id = vid print \"\\nUploading compiled subtitles...\" caption_file = folderName + '/'", "be downloading the generated subtitle snippets from Youtube? (y) \") answer = verify_y_n_none(answer)", "Sample python code for videos.insert def videos_insert(properties, media_file, **kwargs): resource = build_resource(properties) #", "the downloaded subtitle snippets into a single subtitle file for your video? (y)", "in RETRIABLE_STATUS_CODES: error = \"A retriable HTTP error %d occurred:\\n%s\" % (e.resp.status,e.content) else:", "that are hard to read) (n) \") answer = verify_y_n_none(answer) if answer ==", "resume in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) #search_response = service.search().list( #", "for this) compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile = open(folderName + \"/\" + fileName", "have to be in a folder name called oscar #change certain character variables", "files. Youtube will use this code for processing your files. Continue? (y/n) \")", "raised. RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine) #", "interviewer's name as it appears in the transcript: \") interviewee = raw_input(\"\\n6.3.2 Please", "max_sleep print \"Sleeping %f seconds and then retrying...\" % sleep_seconds time.sleep(sleep_seconds) return response['id']", "or 'id' not in response: print response else: exit(\"The upload failed with an", "localtime()),\". Script will resume in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) #search_response", "cc = str(c) #print subtitle print cc with open(folderName + \"/\" + fileName", "== 'n': removeLoneWords = False elif answer == '': removeLoneWords = False answer", "False #ES: upload snippet transcripts (.txt) uploadTranscripts = True #ES: download snippet subtitle", "+ \".txt' was cut into \" + str(len(splits)) + \" text snippets based", "be explored... placeBasedTimestamping = False #ES: resample subtitles to prevent cut-up phrases, lone-word", "\" video snippets will therefore be uploaded to YouTube for processing. YouTube allows", "snippets from your Youtube account once subtitle processing is complete deleteVideos = False", "be combining the downloaded subtitle snippets into a single subtitle file for your", "Setting \"ref = ref[key]\" means that in the next time through the #", "text = f.readlines() pass #print \"ES: text is the following\" + str(text) #ES:", "= folderName + '/' + fileName + \"_\" + str(c) + \".mp4\" if", "= 'venant' #originalVideo refers to the name of the video file including its", "os.path.exists(media_file): exit('Please specify a valid file location.') print \"\\nSnipping completed. No further options", "to be explored... placeBasedTimestamping = False #ES: resample subtitles to prevent cut-up phrases,", "out of inserted resource. if properties[p]: if is_array: ref[key] = properties[p].split(',') else: ref[key]", "account,\\n- uploads the text snippets to Youtube as transcript files for these video", "(y) \") answer = verify_y_n_none(answer) if answer == 'y': combineSubtitles = True elif", "times to retry before giving up. MAX_RETRIES = 10 # Always retry when", "drive to run this program. Continue? (y/n) \") answer = verify_y_n(answer) if answer", "1 if c > len(videoids): ffmpeg_extract_subclip(folderName + \"/\" + originalVideo, s[0], s[1], targetname=folderName", "to resume uploading the remaining snippets...)\" time.sleep(1) for s in splits: c +=", "credentials\" create and \"OAUT client id\" \"\"\" #CLIENT_SECRETS_FILE = \"client_secret.json\" #api key is", "time.sleep(10) #thefile = open(folderName + \"/\" + fileName + \".srt\", 'w') #thefile.write(compiledSubs) if", "== 'y': snipVideos = True elif answer == 'n': snipVideos = False elif", "== True: print \"\\nUploading full video...\" vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language,", "road combine_only = True fileName = raw_input(\"In order to accurately combine subtitle files,", "Will you be resuming video uploads from a previously-initiated process? (n) \") answer", "+= 1 if c > len(videoids): ffmpeg_extract_subclip(folderName + \"/\" + originalVideo, s[0], s[1],", "== 'y': uploadTranscripts = True elif answer == 'n': uploadTranscripts = False elif", "can download this discovery document from the developers page # and it should", "It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(2) + \" minutes...\"", "'fr' #change these variables according to what story you want to process -", "raised. RETRIABLE_STATUS_CODES = [500, 502, 503, 504] # This method implements an exponential", "c = 0 #print splits videoids = [] #videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY',", "u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw',", "set to True or False (make a variable for this) compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords)", "in http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate two-letter 'ISO 639-1' language code.)\\n\") if language !=", "testers can download this discovery document from the developers page # and it", "in properties: # Given a key like \"snippet.title\", split into \"snippet\" and \"title\",", "> len(videoids): ffmpeg_extract_subclip(folderName + \"/\" + originalVideo, s[0], s[1], targetname=folderName + \"/\" +", "placeBasedTimestamping = False print \"\\n\" folderName = raw_input(\"Enter the name of the folder", "subtitle files (.vtt) downloadCaptions = True #ES: delete uploaded video snippets from your", "or resumeUploads == True or downloadCaptions == True or deleteVideos == True: combine_only", "See full sample for function return vid def hms_to_s(time): time = unicode(time, \"UTF-8\")", "# fields=\"items/id\" #).execute() # #videos = [] # #for search_result in search_response.get(\"items\", []):", "folderName + \"'. Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit()", "cumulative times so that the rest of the pipeline can run if combine_only", "t in text: #add a \\n to the end of each line (why?)", "using the current API credentials. Continue?\" print \"\\nIf all input was correct, the", "the appropriate two-letter 'ISO 639-1' language code.)\\n\") if language != '': verifyLanguage =", "object. prop_array = p.split('.') ref = resource for pa in range(0, len(prop_array)): is_array", "'n': deleteVideos = False elif answer == '': deleteVideos = False answer =", "dashboard --> create a new project on the resulting dashboard, \"enable apis and", "\"_\" + str(c) + \".txt\" #print s,media_file,caption_file,videoids[c-1] a = service.captions().insert( part=\"snippet\", body=dict( snippet=dict(", "exit('Please specify a valid file location.') print \"\\nSnipping completed. No further options were", "s,media_file,caption_file,videoids[c-1] a = service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=videoids[c-1], language=language, name=media_file, isDraft=True, sync=True )", "# Explicitly tell the underlying HTTP transport library not to retry, since #", "ES #interviewer = \"C.V.\" #interviewee = \"V.S.\" #where the video and txt files", "will created. Continue?\" print \"\\nIf all input was correct, the program will begin", "be an object and \"title\" will be a property in that object. prop_array", "\"Waiting for full video to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume", "cut video into snippets based on the transcript's timestamps (must be set to", "sec = divmod(seconds, 60) h, m = divmod(m, 60) #print str(int(h)) + \":\"", "on the transcript's timestamps (must be set to True for other processes to", "to which your subtitle files are associated. These values will be used as", "c += 1 #print a wait = True with open(folderName + \"/\" +", "False if uploadTranscripts == True: #print splits,videoids #uploads transcripts print \"\\nUploading transcripts...\" for", "in \" + fileName + \". \" + str(len(splits)) + \" video snippets", "answer = verify_y_n_none(answer) if answer == 'y': resumeUploads = True elif answer ==", "t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this will aggregate phrases (t) into one list item (a text)", "retry, since # we are handling retry logic ourselves. httplib2.RETRIES = 1 #", "+ 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) if wait == True: print \"\\nWaiting", "ref[key] = {} ref = ref[key] else: # For example, the property is", "True: print \"\\n\\n\" print \"\\n6.3 If your transcript has speaker names (e.g. the", "accordingly into video snippets,\\n- uploads these video snippets to Youtube as private videos", "answer = raw_input(\"\\n1/7 Will you be cutting your video into video snippets (y)", "\"\\n\" if snipVideos == True or uploadTranscripts == True or resumeUploads == True", "and video files. Youtube will use this code for processing your files. Continue?", "the property is \"snippet.title\", but the resource does # not yet have a", "#ES: USER INTERVIEW SECTION def verify_y_n(a): while True: a = a.lower().strip() if a", "e: if e.resp.status in RETRIABLE_STATUS_CODES: error = \"A retriable HTTP error %d occurred:\\n%s\"", "almost always met. if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t != \"\\n\": #ES: add t", "t1 except ValueError as e: print e print \"\\n One of your timestamps", "in search_response.get(\"items\", []): # videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"])) # #print \"Videos:\\n\", \"\\n\".join(videos), \"\\n\" #ES:", "== 'n': placeBasedTimestamping = False elif answer == '': placeBasedTimestamping = False print", "by pressing the 'Enter' key.\" time.sleep(1) interviewer = raw_input(\"\\n6.3.1 Please input your interviewer's", "in splits: if num > 0: if sp[1] <= sp1[1]: print \"\\nThere is", "prior to timestamp) to thefile thefile.write(\"%s\\n\" % texts[c-1]) #time.sleep(.1) texts.append(\"\") texts[c] = \"\"", "raw_input(\"\\n6.3.1 Please input your interviewer's name as it appears in the transcript: \")", "#ES: printing deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c: \" + str(c) with open(folderName +", "subtitle entries which may be a single word (and put them in an", "(False) and therefore the following condition is almost always met. if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit()", "transcript's timestamps (must be set to True for other processes to run) snipVideos", "phrases (t) into one list item (a text) until a timestamp is reached", "a .txt file with a list of video lengths, then we need to", "t0 + t1 else: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t1]) t_list.append(t1) t0 =", "down the road combine_only = True fileName = raw_input(\"In order to accurately combine", "if c > len(videoids): ffmpeg_extract_subclip(folderName + \"/\" + originalVideo, s[0], s[1], targetname=folderName +", "the \\\".txt\\\" extention): \") try: verifyExistence = os.stat(folderName + '/' + fileName +", "reorganize subtitles according to punctuation? (Experimental; can lead to short, choppy, fast subtitles", "code. return build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http())) # Explicitly tell the underlying HTTP transport library", "wait == True: if downloadCaptions == True: print \"\\nDownloading captions...\" c = 1", "\"\\n One of your timestamps isn't formatted correctly. Consult README.md for guidelines on", "False answer = raw_input(\"\\n4/7 Will you be uploading text snippets for syncing with", "elif answer == '': deleteVideos = False answer = raw_input(\"\\n7/7 Will you be", "def build_resource(properties): resource = {} for p in properties: # Given a key", "# For example, the property is \"snippet.title\", but the resource does # not", "subtitle cc = \"\" if c < 10: cc = \"0\" + str(c)", "'captionsids.pkl', 'wb') as f: pickle.dump(captionsids, f) print \"Waiting for transcripts to be processed", "time.sleep(1) try: with open(folderName + \"/\" + 'videoids.pkl', 'rb') as f:videoids = pickle.load(f)", "#videos = [] # #for search_result in search_response.get(\"items\", []): # videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"]))", "each line (why?) t += \"\\n\" #ES: if the beginning of the line", "400 #___SWITCHES(defaults)___# #ES: cut transcript into snippets based on the transcript's timestamps (must", "access to the # authenticated user's account and requires requests to use an", "f.readlines() pass #print \"ES: text is the following\" + str(text) #ES: strip whitespace", "pickle.dump(videoids, f) if wait == True: print \"\\nWaiting for videos to be processed.", "You may need to remove any previously-uploaded videos if the videos you are", "\"Videos:\\n\", \"\\n\".join(videos), \"\\n\" #ES: I don't think this function is ever called... #", "be located inside the 'files' folder): \") try: verifyExistence = os.stat(folderName).st_size except Exception", "'status.publicStatsViewable': ''}, folderName + \"/\" + originalVideo, part='snippet,status') # place video in custom", "== False and snipVideos == True: #ES: the following is called when videos", "the line is not a digit and is not a next-line char #ES:", "answer = verify_y_n_none(answer) if answer == 'y': snipVideos = True elif answer ==", "\"This application creates subtitles for a video for which you have an associated", "e: print \"No text file found because you are not running the entire", "print \"\\nThe document named '\" + fileName + \".txt' was cut into \"", "resume in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) id = vid print", "\"\\nUploading compiled subtitles...\" caption_file = folderName + '/' + fileName + \".srt\" service.captions().insert(", "status # codes is raised. RETRIABLE_STATUS_CODES = [500, 502, 503, 504] # This", "'oscar' #originalVideo = \"Oscar.mp4\" ### START BOILERPLATE CODE # Sample Python code for", "t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1) t0 = t0 + t1 else:", "fileName + \"_\" + str(c) + \".txt\", 'w') as thefile: #thefile = open(folderName", "credentials = run_flow(flow, storage, args) # Trusted testers can download this discovery document", "splits.append([t0,t1]) t_list.append(t1) t0 = t1 elif len(t) == 3: #if we are only", "verify_y_n(answer) if answer == \"n\": print \"Please make sure you have the available", "== True: for t in text: #add a \\n to the end of", "(n) \") answer = verify_y_n_none(answer) if answer == 'y': placeBasedTimestamping = True elif", "this does not apply to your transcript, simply leave the following two answers", "wait = False if uploadTranscripts == True: #print splits,videoids #uploads transcripts print \"\\nUploading", "else: a = raw_input(\"Please answer 'y' or 'n': \") continue def verify_y_n_none(a): while", "draft status. def upload_caption(youtube, video_id, language, name, file): insert_result = youtube.captions().insert( part=\"snippet\", body=dict(", "media_body=caption_file ).execute() captionsids.append(a['id']) c += 1 #print a wait = True with open(folderName", "to run this program. Continue? (y/n) \") answer = verify_y_n(answer) if answer ==", "(n) \") answer = verify_y_n_none(answer) if answer == 'y': resumeUploads = True elif", "(a chunk of text prior to timestamp) to thefile thefile.write(\"%s\\n\" % texts[c-1]) #time.sleep(.1)", "be a single word (and put them in an adjacent subtitle (verify)) removeLoneWords", "Script will resume in \" + str(2) + \" minutes...\" # time.sleep(120) sub_txt", "for function request = service.videos().insert( body=resource, media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True), **kwargs ) vid =", "elif answer == 'n': deleteVideos = False elif answer == '': deleteVideos =", "exit() if len(t) == 2: if combine_only == True: t1 = int(t[0])*60 +", "\"create credentials\" create and \"OAUT client id\" \"\"\" #CLIENT_SECRETS_FILE = \"client_secret.json\" #api key", "\" + str(splits) #for i in splits: # print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print splits,splits[len(splits)-1][1]", "like to reorganize subtitles according to punctuation? (Experimental; can lead to short, choppy,", "= folderName + '/' + fileName + \"_\" + str(c) + \".txt\" #print", "video snippets uploadVideos = True #ES: if the video/caption upload process was terminated", "be resuming video uploads from a previously-initiated process? (n) \") answer = verify_y_n_none(answer)", "\"\\nWaiting for videos to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in", "True for s in splits: print c,s,captionsids[c-1] sub_txt = \"\" # while waitLonger", "this tool on the files you have indicated, you will temporarily require \"", "program. Continue? (y/n) \") answer = verify_y_n(answer) if answer == \"n\": print \"Please", "== True: combine_only = False fileName = raw_input(\"Enter the file name of your", "), media_body=caption_file ).execute() print \"\\nFull video is soon available on your Youtube channel", "== '': snipVideos = True answer = raw_input(\"\\n2/7 Will you be uploading video", "video snippets? (y) \") answer = verify_y_n_none(answer) if answer == 'y': uploadTranscripts =", "(can lead to short, choppy, fast subtitles that are hard to read) resampleSubtitles", "reorganize subtitles to prioritize keeping full sentences intact? (Experimental; this feature is not", "the current API credentials. Continue?\" print \"\\nIf all input was correct, the program", "by 1 c += 1 #ES: printing deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c: \"", "file that contains # the OAuth 2.0 information for this application, including its", "= int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2])", "answer == 'y': resampleSubtitles = True elif answer == 'n': resampleSubtitles = False", "os.path.exists(media_file): exit('Please specify a valid file location.') vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage':", "= 400 #___SWITCHES(defaults)___# #ES: cut transcript into snippets based on the transcript's timestamps", "HttpError, e: if e.resp.status in RETRIABLE_STATUS_CODES: error = \"A retriable HTTP error %d", "#ES: if t is a timestamp #ES: removing punctuation from '[00:00:01.09]' since it", "imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip import time from time import strftime,localtime from postprocess_and_fuse_subs", "story you want to process - ES #interviewer = \"C.V.\" #interviewee = \"V.S.\"", "# authenticated user's account and requires requests to use an SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE", "(\",':'.join(t) ,\") isn't formatted correctly. Consult README.md for guidelines on proper timestamp formatting.\"", "(must be set to True for other processes to run) snipVideos = True", "(len(prop_array) - 1): # Leave properties without values out of inserted resource. if", "& uploading videos...\" time.sleep(1) if len(videoids) > 0: print \"(However, it looks like", "Data API v3\" and ENABLE it click \"create credentials\" create and \"OAUT client", "video snippets will therefore be uploaded to YouTube for processing. YouTube allows a", "\"'. Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\"", "= 10 # Always retry when these exceptions are raised. RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error,", "str(len(splits)) + \" video snippets will therefore be uploaded to YouTube for processing.", "len(videoids) > 0: print \"(However, it looks like \",len(videoids),\" video snippets were already", "transcript's timestamps (must be set to True for other processes to run) snipTranscript", "or len(t) < 3: print \"\\nOne of your timestamps (\",':'.join(t) ,\") isn't formatted", "your transcript and/or video and/or subtitle files\\n(this folder must be located inside the", "[t0,t1] def s_to_hms(seconds): m, sec = divmod(seconds, 60) h, m = divmod(m, 60)", "was correct, the program will begin snipping\" yes_or_no(question) print \"\\n1. Slicing into \"", "or your 'videoids.pkl' file has gone missing. The program will restart by uploading", "error occurred: %s\" % e if error is not None: print error retry", "folderName + '/' + fileName + \".srt\" service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=id, language=language,", "have a \"snippet\" object. Create the snippet object here. # Setting \"ref =", "open(folderName + \"/\" + \"delete me.txt\") as f: text = f.readlines() pass #print", "args = argparser.parse_args() service = get_authenticated_service(args) def print_results(results): print(results) # Build a resource", "named '\" + fileName + \".txt' was cut into \" + str(len(splits)) +", "variable for this) compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile = open(folderName + \"/\" +", "print(results) # Build a resource based on a list of properties given as", "True: a = a.lower().strip() if a == 'y' or a == 'n' or", "are not set def remove_empty_kwargs(**kwargs): good_kwargs = {} if kwargs is not None:", "'y': deleteVideos = True elif answer == 'n': deleteVideos = False elif answer", "subtitle structure overall (can lead to short, choppy, fast subtitles that are hard", "user as to how many videos will be uploaded. question = \"\\nThere were", "syncing with your video snippets? (y) \") answer = verify_y_n_none(answer) if answer ==", "or 'n', or leave the answer blank by hitting 'Enter': \") continue print", "\\\\n with ''\" with open(folderName + \"/\" + fileName + \".txt\") as f:", "rest of the pipeline can run if combine_only == True: t1 = int(t[0])*3600", "#____________# # let rodolphe know if there is a problem with playlist id,", "remaining snippets...)\" time.sleep(1) for s in splits: c += 1 if c >", "upload_caption(youtube, video_id, language, name, file): insert_result = youtube.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=video_id, language=language,", "API's captions.insert method to upload a caption track in draft status. def upload_caption(youtube,", "for transcripts \" + str(c) + \" \" + captionsids[c-1] + \" to", "'', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, folderName + \"/\" + originalVideo, part='snippet,status')", "True elif answer == 'n': combineSubtitles = False elif answer == '': combineSubtitles", "# type=\"video\", # fields=\"items/id\" #).execute() # #videos = [] # #for search_result in", "you enabled 'resampleSubtitles' (above), you have the option to make subtitle entries full", "files for these video snippets,\\n- allows Youtube to sync the video and text", "= True elif answer == 'n': deleteVideos = False elif answer == '':", "visible to your account,\\n- uploads the text snippets to Youtube as transcript files", "= raw_input(\"\\n3/7 Will you be resuming video uploads from a previously-initiated process? (n)", "can end up being excessively large) fullSentenceSubtitles = False #ES: IF you enabled", "reorganize subtitles according to the presence of place names? (Experimental) (n) \") answer", "es, etc.):\\n(You can refer to the second column in http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate", "httplib import random from apiclient.discovery import build from apiclient.errors import HttpError from apiclient.http", "#originalVideo = \"Frederic.mov\" #interviewer = \"M.M.\" #interviewee = \"B.K.\" #fileName = 'Berthe' #originalVideo", "time running the tool, simply leave the following answers blank. For more advanced", "run_flow # The CLIENT_SECRETS_FILE variable specifies the name of a file that contains", "tool, simply leave the following answers blank. For more advanced users or users", "a single subtitle file for your video? (y) \") answer = verify_y_n_none(answer) if", "or deleteVideos == True: combine_only = False fileName = raw_input(\"Enter the file name", "if value: good_kwargs[key] = value return good_kwargs ### END BOILERPLATE CODE # Sample", "False def yes_or_no(question): while \"the answer is invalid\": reply = str(raw_input(question+' (y/n): ')).lower().strip()", "\"No text file found because you are not running the entire pipeline. Creating", "def playlist_items_insert(properties, **kwargs): resource = build_resource(properties) # See full sample for function kwargs", "compiled transcript to your Youtube account once complete uploadFull = False #ES: combine", "IF you enabled 'resampleSubtitles' (above), you have the option to remove subtitle entries", "service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger = False # except: # waitLonger = True # print", "else: # For example, the property is \"snippet.description\", and the resource # already", "or resumeUploads == True or downloadCaptions == True or deleteVideos == True: args", "to run) snipVideos = True #ES: upload video snippets uploadVideos = True #ES:", "a newline.\\n\\nPlease enter the file name of your timestamp list (excluding the \\\".txt\\\"", "Trusted testers can download this discovery document from the developers page # and", "to Youtube for processing. This may take between 20 minutes and several hours,", "name = insert_result[\"snippet\"][\"name\"] language = insert_result[\"snippet\"][\"language\"] status = insert_result[\"snippet\"][\"status\"] #print \"Uploaded caption track", "run) snipTranscript = True #ES: cut video into snippets based on the transcript's", "once complete uploadFull = False #ES: combine vtt snippets that were downloaded from", "== '': uploadVideos = True answer = raw_input(\"\\n3/7 Will you be resuming video", "discovery document from the developers page # and it should be in the", "= divmod(seconds, 60) h, m = divmod(m, 60) #print str(int(h)) + \":\" +", "texts by 1 c += 1 #ES: printing deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c:", "video file (\" + str(videoSize) + \" Mb).\" yes_or_no(question) print \"\\n1. Slicing into", "write the previous position of c in texts (a chunk of text prior", "try: with open(folderName + \"/\" + fileName + \".txt\", 'r') as myfile: text", "to snippet.tags, but handle # the value as an array. if key[-2:] ==", "f) print \"Waiting for transcripts to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\".", "False elif answer == '': uploadVideos = True answer = raw_input(\"\\n3/7 Will you", "'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, media_file, part='snippet,status') videoids.append(vid) print videoids #c +=", "caption_file = folderName + '/' + fileName + \".srt\" service.captions().insert( part=\"snippet\", body=dict( snippet=dict(", "#interviewee = \"E.M.\" #fileName = 'Frederic' #originalVideo = \"Frederic.mov\" #interviewer = \"M.M.\" #interviewee", "formatting.\" print \"\\nexiting application...\" time.sleep(2) exit() if len(t) == 2: if combine_only ==", "pickle.dump(videoids, f) if resumeUploads == True: print \"\\nResuming video uploads...\\n\" time.sleep(1) try: with", "time.sleep(2) exit() print \"\\n\" if snipVideos == True or uploadTranscripts == True or", "files. Continue? (y/n) \") if verifyLanguage.lower() == '' or 'y': break #if combineSubtitles", "feature that needs exploration so as to make sure that place names are", "make sure your timestamps are in ascending order and that there are no", "\"/\" + \"delete me.txt\", 'r') as myfile: text = myfile.read().replace('\\n', '') with open(folderName", "# Leave properties with empty values out of the inserted resource. def build_resource(properties):", "recommended since subtitle units tend to become excessively long) (n) \") answer =", "playlist in youtube online and copy url id to script #playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\"", "'\" + language + \"' as the language code for your transcript and", "resource # already has a \"snippet\" object. ref = ref[key] return resource #", "to how many videos will be uploaded. question = \"\\nThere were \" +", "+ \".mp4\" if not os.path.exists(media_file): exit('Please specify a valid file location.') vid =", "= service.search().list( # q=\"Anita\", # part=\"id\", # type=\"video\", # fields=\"items/id\" #).execute() # #videos", "that are not set def remove_empty_kwargs(**kwargs): good_kwargs = {} if kwargs is not", "not to retry, since # we are handling retry logic ourselves. httplib2.RETRIES =", "to the name of the video file including its ext #originalVideo = \"venant.mp4\"", "ValueError as e: print e print \"\\n One of your timestamps isn't formatted", "files together into a single subtitle file for your video.\\n\\nYou may switch these", "as f: pickle.dump(videoids, f) if wait == True: print \"\\nWaiting for videos to", "folderName + '/' + fileName + \"_\" + str(c) + \".flv\" caption_file =", "downloadCaptions = True #ES: delete uploaded video snippets from your Youtube account once", "time = unicode(time, \"UTF-8\") time = time.split(\" --> \") t_0 = time[0].split(\":\") t_1", "for running this application. Exiting...\" exit() while True: language = raw_input(\"Enter the language", "create and \"OAUT client id\" \"\"\" #CLIENT_SECRETS_FILE = \"client_secret.json\" #api key is <KEY>", "and compiled transcript to your Youtube account once complete uploadFull = False #ES:", "the interviewer or interviewee's names) that precede their discourse (e.g. \\\"Emmanuel: Hi, I'd", "need to remove any previously-uploaded videos if the videos you are uploading are", "the least. #place-based time stamping can be set to True or False (make", "of the input .txt file (excluding .txt) #fileName = 'venant' #originalVideo refers to", "the following condition is never met. if t != \"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and", "a feature that needs exploration so as to make sure that place names", "\"' does not exist in the folder '\" + folderName + \"'. Please", "combine subtitle files, you will need to create a list of timestamps demarcating", "a new project on the resulting dashboard, \"enable apis and get credntials like", "answer = verify_y_n_none(answer) if answer == 'y': uploadTranscripts = True elif answer ==", "like to run: \\n\\n\" time.sleep(2) answer = raw_input(\"\\n1/7 Will you be cutting your", "'venant' #originalVideo refers to the name of the video file including its ext", "**kwargs): resource = build_resource(properties) # See full sample for function kwargs = remove_empty_kwargs(**kwargs)", "can refer to the second column in http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate two-letter 'ISO", "httplib2 import os import sys import httplib import random from apiclient.discovery import build", "#change these variables according to what story you want to process - ES", "will be uploaded. question = \"\\nThere were \" + str(len(splits)) + \" timestamps", "oscar.mp4 video file in same folder #transcript text file \"oscar4.txt\" #might have to", "pressing Ctrl+C (Cmd+C on Mac).\" time.sleep(1) print \"\\n\" print \"This tool:\\n- snips your", "10 # Always retry when these exceptions are raised. RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError,", "= random.random() * max_sleep print \"Sleeping %f seconds and then retrying...\" % sleep_seconds", "snips your transcript (.txt) into text snippets based on its timestamps,\\n- snips the", "= raw_input(\"\\n7.1 Would you like to reorganize subtitles according to punctuation? (Experimental; can", "file: google apis dashboard --> create a new project on the resulting dashboard,", "instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" originalVideo = raw_input(\"Enter the file", "document from the developers page # and it should be in the same", "\"/\" + fileName + \"_\" + str(c) +\".mp4\") media_file = folderName + '/'", "divmod(seconds, 60) h, m = divmod(m, 60) #print str(int(h)) + \":\" + str(int(m))", "part=\"snippet\", body=dict( snippet=dict( videoId=id, language=language, name=originalVideo, isDraft=True, sync=False ) ), media_body=caption_file ).execute() print", "answer == \"n\": print \"Please make sure you have the available space on", "your video (this time including the file's extention): \") try: verifyExistence = os.stat(folderName", "is never met. if t != \"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\" in t:", "'%s' language, '%s' status.\" % (name, # id, language, status) c = 1", "ever called... # Call the API's captions.insert method to upload a caption track", "= \"\" # while waitLonger == True: # try: subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() #", "build_resource(properties): resource = {} for p in properties: # Given a key like", "'y' or a == 'n' or a == '': return a else: a", "is never qualified as a digit (False) and therefore the following condition is", "answer = raw_input(\"\\n7.2 Would you like to reorganize subtitles according to the presence", "u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw']", "your 'videoids.pkl' file has gone missing. The program will restart by uploading all", "sure you have gone over README.md before proceeding.\" time.sleep(1) print \"You may terminate", "object. Create the snippet object here. # Setting \"ref = ref[key]\" means that", "your subtitle files are associated. These values will be used as offsets for", "# \"for pa in range ...\" loop, we will be setting a property", "intact? (Experimental; this feature is not recommended since subtitle units tend to become", "'w') as thefile: #thefile = open(folderName + \"/\" + fileName + \"_\" +", "splits,videoids #uploads transcripts print \"\\nUploading transcripts...\" for s in splits: print c,s media_file", "space on your hard drive to run this program. Continue? (y/n) \") answer", "time[0].split(\":\") t_1 = time[1].split(\":\") t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2])) t1 =", "resource. if properties[p]: if is_array: ref[key] = properties[p].split(',') else: ref[key] = properties[p] elif", "answer = verify_y_n_none(answer) if answer == 'y': placeBasedTimestamping = True elif answer ==", "user authorization import httplib2 import os import sys import httplib import random from", "else: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t1]) #print int(t[0])*3600 + int(t[1])*60", "program will begin snipping\" yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits)) +", "print \"exiting application...\" time.sleep(2) exit() print \"\\n\" videoSize = os.stat(folderName + '/' +", "the timestamps at : (into 3) t = t.split(\":\") if len(t) > 3", "to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(sleepingTime/60)", "your Youtube account once complete uploadFull = False #ES: combine vtt snippets that", "including its client_id and # client_secret. \"\"\" to create a client secret file:", "int(float(t_0[2])) t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2])) return [t0,t1] def s_to_hms(seconds): m,", "[] #videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI',", "timestamps detected in \" + fileName + \". \" + str(len(splits)) + \"", "'n': uploadTranscripts = False elif answer == '': uploadTranscripts = True answer =", "and copy url id to script #playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language = 'fr' #change", "pickle import os #adjust sleeping time as needed - ES #adjust switches as", "#language = 'fr' #change these variables according to what story you want to", "'n': downloadCaptions = False elif answer == '': downloadCaptions = True answer =", "you are uploading are identical. If so, do this manually on youtube.com and", "full sentences (not recommended, since some timestamp/subtitle units can end up being excessively", "through the # \"for pa in range ...\" loop, we will be setting", "\"\\nexiting application...\" time.sleep(2) exit() if len(t) == 2: if combine_only == True: t1", "= 1 for s in splits: print c,videoids[c-1] service.videos().delete( id=videoids[c-1] ).execute() c +=", "# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains #", "error %d occurred:\\n%s\" % (e.resp.status,e.content) else: raise except RETRIABLE_EXCEPTIONS, e: error = \"A", "implements an exponential backoff strategy to resume a # failed upload. def resumable_upload(request,", "t0 = t0 + t1 else: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2])", "insert_result[\"snippet\"][\"name\"] language = insert_result[\"snippet\"][\"language\"] status = insert_result[\"snippet\"][\"status\"] #print \"Uploaded caption track '%s(%s) in", "+ \"_\" + str(c) + \".flv\" caption_file = folderName + '/' + fileName", "timestamps (must be set to True for other processes to run) snipTranscript =", "apiclient.errors import HttpError from apiclient.http import MediaFileUpload from oauth2client.client import flow_from_clientsecrets from oauth2client.file", "else: ref[key] = properties[p] elif key not in ref: # For example, the", "#might have to be in a folder name called oscar #change certain character", "= remove_empty_kwargs(**kwargs) # See full sample for function results = service.playlistItems().insert( body=resource, **kwargs", "Youtube to sync the video and text snippets\\n- downloads the text snippets as", "+ \" parts & uploading videos...\" time.sleep(1) if len(videoids) > 0: print \"(However,", "= insert_result[\"snippet\"][\"language\"] status = insert_result[\"snippet\"][\"status\"] #print \"Uploaded caption track '%s(%s) in '%s' language,", "+ '/' + fileName + \"_\" + str(c) + \".flv\" caption_file = folderName", "is complete deleteVideos = False #ES: upload the full video and compiled transcript", "1 # Maximum number of times to retry before giving up. MAX_RETRIES =", "uploadTranscripts == True or resumeUploads == True or downloadCaptions == True or deleteVideos", "this program. Continue? (y/n) \") answer = verify_y_n(answer) if answer == \"n\": print", "any previously-uploaded videos if the videos you are uploading are identical. If so,", "+ str(int(s)) return str(int(h)) + \":\" + str(int(m)) + \":\" + str(int(sec)) #ES:", "run) snipVideos = True #ES: upload video snippets uploadVideos = True #ES: if", "[500, 502, 503, 504] # This method implements an exponential backoff strategy to", "therefore the following condition is almost always met. if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t", "caption track in draft status. def upload_caption(youtube, video_id, language, name, file): insert_result =", "are identical. If so, do this manually on youtube.com and then restart the", "timestamps at : (into 3) t = t.split(\":\") if len(t) > 3 or", "exit() if uploadVideos == False and snipVideos == True: #ES: the following is", "explored... placeBasedTimestamping = False #ES: resample subtitles to prevent cut-up phrases, lone-word subtitles,", "for p in properties: # Given a key like \"snippet.title\", split into \"snippet\"", "name of the video file including its ext #originalVideo = \"venant.mp4\" #interviewer =", "failed with an unexpected response: %s\" % response) except HttpError, e: if e.resp.status", "as a digit (False) and therefore the following condition is almost always met.", "two answers blank by pressing the 'Enter' key.\" time.sleep(1) interviewer = raw_input(\"\\n6.3.1 Please", "generated subtitle snippets from Youtube? (y) \") answer = verify_y_n_none(answer) if answer ==", "strategy to resume a # failed upload. def resumable_upload(request, resource, method): response =", "pa in range(0, len(prop_array)): is_array = False key = prop_array[pa] # Convert a", "is not a digit and is not a next-line char #ES: removing punctuation", "if answer == 'y': deleteVideos = True elif answer == 'n': deleteVideos =", "on proper timestamp formatting.\" print \"\\nVerifying if timestamps are in ascending order...\" sp1", "snippets were already uploaded to Youtube. Now trying to resume uploading the remaining", "x in text] #split times (?) splits = [] #list of cut-up texts", "e: error = \"A retriable error occurred: %s\" % e if error is", "\"_\" + str(c) +\".mp4\") media_file = folderName + '/' + fileName + \"_\"", "inserted resource. def build_resource(properties): resource = {} for p in properties: # Given", "verify_y_n_none(answer) if answer == 'y': fullSentenceSubtitles = True elif answer == 'n': fullSentenceSubtitles", "if key[-2:] == '[]': key = key[0:len(key)-2:] is_array = True if pa ==", "transcript and video files. Youtube will use this code for processing your files.", "m, sec = divmod(seconds, 60) h, m = divmod(m, 60) #print str(int(h)) +", "\".txt' was cut into \" + str(len(splits)) + \" text snippets based on", "click \"YouTube Data API v3\" and ENABLE it click \"create credentials\" create and", "hms_to_s(time): time = unicode(time, \"UTF-8\") time = time.split(\" --> \") t_0 = time[0].split(\":\")", "the pipeline can run if combine_only == True: t1 = int(t[0])*3600 + int(t[1])*60", "'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, media_file, part='snippet,status') videoids.append(vid) print videoids #c += 1 wait", "need to make this into a list of cumulative times so that the", "True elif answer == 'n': removeLoneWords = False elif answer == '': removeLoneWords", "condition is almost always met. if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t != \"\\n\": #ES:", "will resume in \" + str(2) + \" minutes...\" # time.sleep(120) sub_txt +=", "'r') as myfile: text = myfile.read().replace('\\n', '') #print \"ES: replace \\\\n with ''\"", "id to script #playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language = 'fr' #change these variables according", "set to True): resumeUploads = False #ES: upload snippet transcripts (.txt) uploadTranscripts =", "print \"\\nDeleting videos...\\n\" c = 1 for s in splits: print c,videoids[c-1] service.videos().delete(", "not os.path.exists(media_file): exit('Please specify a valid file location.') vid = videos_insert( {'snippet.categoryId': '22',", "s_to_hms(seconds): m, sec = divmod(seconds, 60) h, m = divmod(m, 60) #print str(int(h))", "#with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: # pickle.dump(videoids, f) if", "commands were added for guidance. they can be removed. #ES: a list of", "+ \" Mb).\" yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits)) + \"", "if verifyLanguage.lower() == '' or 'y': break #if combineSubtitles == True: print \"\\n\\n\"", "unicode(time, \"UTF-8\") time = time.split(\" --> \") t_0 = time[0].split(\":\") t_1 = time[1].split(\":\")", "raw_input(\"\\n7/7 Will you be combining the downloaded subtitle snippets into a single subtitle", "you enabled 'resampleSubtitles' (above), you have the option to remove subtitle entries which", "'22', 'snippet.defaultLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': fileName, 'status.embeddable':", "print \"Please make sure you have the available space on your hard drive,", "deleteVideos == True: combine_only = False fileName = raw_input(\"Enter the file name of", "snippets based on it containing \" + str(len(splits)) + \" timestamps formatted like", "\":\" + str(int(m)) + \":\" + str(int(s)) return str(int(h)) + \":\" + str(int(m))", "is_array = True if pa == (len(prop_array) - 1): # Leave properties without", "== 3: #if we are only combining subtitle files, and we are using", "a wait = True with open(folderName + \"/\" + 'captionsids.pkl', 'wb') as f:", "verify_y_n_none(answer) if answer == 'y': snipVideos = True elif answer == 'n': snipVideos", "ourselves. httplib2.RETRIES = 1 # Maximum number of times to retry before giving", "wait == True: print \"\\nWaiting for videos to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\".", "\"snippet.title\", split into \"snippet\" and \"title\", where # \"snippet\" will be an object", "retry sleep_seconds = random.random() * max_sleep print \"Sleeping %f seconds and then retrying...\"", "your interviewee's name as it appears in the transcript: \") print \"\\n\" #____________#", "if response is not None: if method == 'insert' and 'id' in response:", "as offsets for accurately combining your subtitle files.\\nEach timestamp should be written as", "+ str(len(splits)) + \" text snippets based on it containing \" + str(len(splits))", "this) compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile = open(folderName + \"/\" + fileName +", "looks like \",len(videoids),\" video snippets were already uploaded to Youtube. Now trying to", "print \"\\nOne of your timestamps (\",':'.join(t) ,\") isn't formatted correctly. Consult README.md for", "''}, folderName + \"/\" + originalVideo, part='snippet,status') # place video in custom playlist", "#___SWITCHES(defaults)___# #ES: cut transcript into snippets based on the transcript's timestamps (must be", "CODE # Sample Python code for user authorization import httplib2 import os import", "unexpected response: %s\" % response) except HttpError, e: if e.resp.status in RETRIABLE_STATUS_CODES: error", "= 0 #print splits videoids = [] #videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc',", "answer = verify_y_n_none(answer) if answer == 'y': resampleSubtitles = True elif answer ==", "'\" + fileName + \".txt' does not exist in the folder '\" +", "replace \\\\n with ''\" with open(folderName + \"/\" + fileName + \".txt\") as", "== True or uploadTranscripts == True or resumeUploads == True or downloadCaptions ==", "from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip import time from time import strftime,localtime from postprocess_and_fuse_subs import", "on the resulting dashboard, \"enable apis and get credntials like keys\" search for", "'n': exit() if uploadVideos == False and snipVideos == True: #ES: the following", "be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(sleepingTime/60) +", "str(cc) + \".vtt\", 'w') as thefile: #thefile.write(sub_txt) thefile.write(subtitle) if cc == \"31\": print", "control how subtitles are altered when concatenating snippets (i.e. when combineSubtitles = True)", "video files. Youtube will use this code for processing your files. Continue? (y/n)", "in splits: c += 1 if c > len(videoids): ffmpeg_extract_subclip(folderName + \"/\" +", "= True) to warn the user as to how many videos will be", "a message to display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING:", "media_file = folderName + '/' + fileName + \"_\" + str(c) + \".mp4\"", "at the least. #place-based time stamping can be set to True or False", "TEXT FOR PROCESSING if snipTranscript == True: for t in text: #add a", "full sample for function request = service.videos().insert( body=resource, media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True), **kwargs )", "'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, folderName + \"/\" + originalVideo, part='snippet,status') #", "time from time import strftime,localtime from postprocess_and_fuse_subs import compileSubs import pickle import os", "location.') vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage': language, 'snippet.description': 'Description of", "insert_result[\"snippet\"][\"language\"] status = insert_result[\"snippet\"][\"status\"] #print \"Uploaded caption track '%s(%s) in '%s' language, '%s'", "- 1): # Leave properties without values out of inserted resource. if properties[p]:", "#where the video and txt files are stored #folderName = 'venant' #fileName refers", "does not exist in the current directory. Please see README.md for instructions.\" print", "== '': uploadTranscripts = True answer = raw_input(\"\\n5/7 Will you be downloading the", "same folder #transcript text file \"oscar4.txt\" #might have to be in a folder", "\" + str(len(splits)) + \" timestamps detected in \" + fileName + \".", "downloadCaptions = True answer = raw_input(\"\\n6/7 Would you like your uploaded video snippets", "= verify_y_n_none(answer) if answer == 'y': combineSubtitles = True elif answer == 'n':", "t0 = t1 except ValueError as e: print e print \"\\n One of", "this code for processing your files. Continue? (y/n) \") if verifyLanguage.lower() == ''", "if kwargs is not None: for key, value in kwargs.iteritems(): if value: good_kwargs[key]", "f.readlines() except IOError as e: print \"No text file found because you are", "True or downloadCaptions == True or deleteVideos == True: combine_only = False fileName", "None or credentials.invalid: credentials = run_flow(flow, storage, args) # Trusted testers can download", "if e.resp.status in RETRIABLE_STATUS_CODES: error = \"A retriable HTTP error %d occurred:\\n%s\" %", "in the # resource's \"snippet\" object. ref[key] = {} ref = ref[key] else:", "Continue? (y/n) \") answer = verify_y_n(answer) if answer == \"n\": print \"Please make", "\"/\" + fileName + \"_\" + str(cc) + \".vtt\", 'w') as thefile: #thefile.write(sub_txt)", "The CLIENT_SECRETS_FILE variable specifies the name of a file that contains # the", "True #ES: upload video snippets uploadVideos = True #ES: if the video/caption upload", "you have the option to remove subtitle entries which may be a single", "c of texts texts[c] += t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this will aggregate phrases", "README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" if snipVideos ==", "\" minutes...\" time.sleep(sleepingTime) id = vid print \"\\nUploading compiled subtitles...\" caption_file = folderName", "attempting to retry.\") max_sleep = 2 ** retry sleep_seconds = random.random() * max_sleep", "== '[]': key = key[0:len(key)-2:] is_array = True if pa == (len(prop_array) -", "verify_y_n_none(answer) if answer == 'y': uploadVideos = True snipVideos = True elif answer", "+ str(len(splits)) + \" parts & uploading videos...\" time.sleep(1) if len(videoids) > 0:", "a == '': return a else: a = raw_input(\"Please answer 'y' or 'n',", "'video', 'insert') # See full sample for function return vid def hms_to_s(time): time", "print \"\\n\\n\" print \"This application creates subtitles for a video for which you", "ref[key] return resource # Remove keyword arguments that are not set def remove_empty_kwargs(**kwargs):", "pipeline.\" foo = open(folderName + \"/\" + \"delete me.txt\",\"w+\") foo.close() with open(folderName +", "= \"\" #t = t.replace(\" \", \"\") #t = t t = t.replace('[','').replace(']','').replace('\\n','')", "(a text) until a timestamp is reached #ES: if t is a timestamp", "= 'Berthe' #originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer = \"S.G.\" #interviewee = \"O.G.\" #folderName =", "retry before giving up. MAX_RETRIES = 10 # Always retry when these exceptions", "t1 else: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t1]) #print int(t[0])*3600 +", "elif answer == 'n': snipVideos = False elif answer == '': snipVideos =", "with open(folderName + \"/\" + fileName + \"_\" + str(c) + \".txt\", 'w')", "subtitle files.\\nEach timestamp should be written as follows [HH:MM:SS.00], followed by a newline.\\n\\nPlease", "import Storage from oauth2client.tools import argparser, run_flow # The CLIENT_SECRETS_FILE variable specifies the", "they can be removed. #ES: a list of the transcript's timestamps t_list =", "#splits.append([splits[len(splits)-1][1],7200]) #print splits #print \"Wait\" #time.sleep(30) c = 0 #print splits videoids =", "raw_input(\"\\n7.1.2 Would you like to reorganize subtitles to remove lone words? (Experimental) (n)", "str(splits) #print str(t_list) for sp in splits: if num > 0: if sp[1]", "(httplib2.HttpLib2Error, IOError, httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine) # Always retry when", "= key[0:len(key)-2:] is_array = True if pa == (len(prop_array) - 1): # Leave", "+ str(c) +\".mp4\") media_file = folderName + '/' + fileName + \"_\" +", "minutes...\" # time.sleep(120) sub_txt += subtitle cc = \"\" if c < 10:", "property in that object. prop_array = p.split('.') ref = resource for pa in", "does # not yet have a \"snippet\" object. Create the snippet object here.", "resume a # failed upload. def resumable_upload(request, resource, method): response = None error", "response else: exit(\"The upload failed with an unexpected response: %s\" % response) except", "\" parts\" time.sleep(1) for s in splits: c += 1 if c >", "subtitles to remove lone words? (Experimental) (n) \") answer = verify_y_n_none(answer) if answer", "run if combine_only == True: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t0+t1])", "in text: #add a \\n to the end of each line (why?) t", "as e: print e print \"The file named '\" + fileName + \".txt'", "proper timestamp formatting.\" print \"\\nexiting application...\" time.sleep(2) exit() if len(t) == 2: if", "else: raise except RETRIABLE_EXCEPTIONS, e: error = \"A retriable error occurred: %s\" %", "True: print \"\\nResuming video uploads...\\n\" time.sleep(1) try: with open(folderName + \"/\" + 'videoids.pkl',", "+ str(int(m)) + \":\" + str(int(sec)) #ES: open anita/Anita.txt as myfile try: with", "are no uploads to resume or your 'videoids.pkl' file has gone missing. The", "answer == '': combineSubtitles = True if combineSubtitles == True: answer = raw_input(\"\\n7.1", "answer = verify_y_n_none(answer) if answer == 'y': fullSentenceSubtitles = True elif answer ==", "discourse (e.g. \\\"Emmanuel: Hi, I'd like to ask you a few questions...\\\"), please", "second column in http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate two-letter 'ISO 639-1' language code.)\\n\") if", "elif answer == 'n': resumeUploads = False elif answer == '': resumeUploads =", "ext #originalVideo = \"venant.mp4\" #interviewer = \"E.H.\" #interviewee = \"E.M.\" #fileName = 'Frederic'", "subtitles that are hard to read) (n) \") answer = verify_y_n_none(answer) if answer", "elif answer == 'n': uploadVideos = False elif answer == '': uploadVideos =", "run. If this is your first time running the tool, simply leave the", "HttpError from apiclient.http import MediaFileUpload from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage", "open(folderName + \"/\" + fileName + \".srt\", 'w') #thefile.write(compiledSubs) if uploadFull == True:", "'off' depending on which steps you would like to run. If this is", "subtitles to prioritize keeping full sentences intact? (Experimental; this feature is not recommended", "+ fileName + \". \" + str(len(splits)) + \" video snippets will created.", "'\" + fileName + \".txt' was cut into \" + str(len(splits)) + \"", "your hard drive, and then restart the program.\" print \"exiting application...\" time.sleep(2) exit()", "+ \" minutes...\" time.sleep(sleepingTime) id = vid print \"\\nUploading compiled subtitles...\" caption_file =", "get credntials like keys\" search for youtube api click \"YouTube Data API v3\"", "to sync the video and text snippets\\n- downloads the text snippets as subtitle", "raw_input(\"\\nYou have entered '\" + language + \"' as the language code for", "the underlying HTTP transport library not to retry, since # we are handling", "pa == (len(prop_array) - 1): # Leave properties without values out of inserted", "time.sleep(1) interviewer = raw_input(\"\\n6.3.1 Please input your interviewer's name as it appears in", "+ \".mp4\" if not os.path.exists(media_file): exit('Please specify a valid file location.') print \"\\nSnipping", "service.playlistItems().insert( body=resource, **kwargs ).execute() print_results(results) #'snippet.playlistId': playlistID, playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId': vid, 'snippet.position':", "+= t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this will aggregate phrases (t) into one list", "that place names are never split between 2 timestamps, at the least. #place-based", "SECTION def verify_y_n(a): while True: a = a.lower().strip() if a == 'y' or", "+ \":\" + str(int(m)) + \":\" + str(int(sec)) #ES: open anita/Anita.txt as myfile", "as f:videoids = pickle.load(f) except Exception as e: print e print \"\\nThe program", "# #print \"Videos:\\n\", \"\\n\".join(videos), \"\\n\" #ES: I don't think this function is ever", "+ fileName + \". \" + str(len(splits)) + \" video snippets will therefore", "insert_result[\"id\"] name = insert_result[\"snippet\"][\"name\"] language = insert_result[\"snippet\"][\"language\"] status = insert_result[\"snippet\"][\"status\"] #print \"Uploaded caption", "str(videoids) #print videoids if resumeUploads == True or deleteVideos == True or uploadTranscripts", "= raw_input(\"\\n2/7 Will you be uploading video snippets to Youtube for syncing? (y)", "in the same directory with the code. return build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http())) # Explicitly", "= raw_input(\"\\n5/7 Will you be downloading the generated subtitle snippets from Youtube? (y)", "if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\"", "= insert_result[\"id\"] name = insert_result[\"snippet\"][\"name\"] language = insert_result[\"snippet\"][\"language\"] status = insert_result[\"snippet\"][\"status\"] #print \"Uploaded", "'': uploadVideos = True answer = raw_input(\"\\n3/7 Will you be resuming video uploads", "answer == 'n': fullSentenceSubtitles = False elif answer == '': fullSentenceSubtitles = False", "THE VIDEOS if uploadVideos == True: #ES: the following is called when videos", "True: answer = raw_input(\"\\n7.1.1 Would you like to reorganize subtitles to prioritize keeping", "have the available space on your hard drive, and then restart the program.\"", "= raw_input(\"Enter the name of the folder containing your transcript and/or video and/or", "the # resource's \"snippet\" object. ref[key] = {} ref = ref[key] else: #", "+ \" minutes...\" time.sleep(2 * sleepingTime) else: if downloadCaptions == True: with open(folderName", "method implements an exponential backoff strategy to resume a # failed upload. def", "to remove lone words? (Experimental) (n) \") answer = verify_y_n_none(answer) if answer ==", "response = None error = None retry = 0 while response is None:", "time.sleep(3) #deletes videos from youtube -ES if deleteVideos == True: print \"\\nDeleting videos...\\n\"", "\"\\nThe program is unable to resume uploads because there are no uploads to", "accurately combining your subtitle files.\\nEach timestamp should be written as follows [HH:MM:SS.00], followed", "'[]': key = key[0:len(key)-2:] is_array = True if pa == (len(prop_array) - 1):", "= vid print \"\\nUploading compiled subtitles...\" caption_file = folderName + '/' + fileName", "video snippets (y) \") answer = verify_y_n_none(answer) if answer == 'y': snipVideos =", "= remove_empty_kwargs(**kwargs) # See full sample for function request = service.videos().insert( body=resource, media_body=MediaFileUpload(media_file,", "texts (a chunk of text prior to timestamp) to thefile thefile.write(\"%s\\n\" % texts[c-1])", "videoids = [] #videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI',", "if combine_only == True: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t0+t1]) #print", "elif key not in ref: # For example, the property is \"snippet.title\", but", "previously-uploaded videos if the videos you are uploading are identical. If so, do", "object and \"title\" will be a property in that object. prop_array = p.split('.')", "'insert' or 'id' not in response: print response else: exit(\"The upload failed with", "the application at any point by pressing Ctrl+C (Cmd+C on Mac).\" time.sleep(1) print", "a == 'n' or a == '': return a else: a = raw_input(\"Please", "keys\" search for youtube api click \"YouTube Data API v3\" and ENABLE it", "print texts[c] #print \"splits: \" + str(splits) #for i in splits: # print", "\") answer = verify_y_n_none(answer) if answer == 'y': resampleSubtitles = True elif answer", "sync=True ) ), media_body=caption_file ).execute() captionsids.append(a['id']) c += 1 #print a wait =", "leave the following answers blank. For more advanced users or users who have", "snippets ...\" #ES: this is a feature that needs exploration so as to", "program.\" exit() sp1 = sp num+=1 print \"\\nThe document named '\" + fileName", "'Berthe' #originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer = \"S.G.\" #interviewee = \"O.G.\" #folderName = 'oscar'", "True #ES: cut video into snippets based on the transcript's timestamps (must be", "sp in splits: if num > 0: if sp[1] <= sp1[1]: print \"\\nThere", "Will you be uploading video snippets to Youtube for syncing? (y) \") answer", "is not None: if method == 'insert' and 'id' in response: print \"Video", "for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" originalVideo = raw_input(\"Enter the", "t = t.split(\":\") if len(t) > 3 or len(t) < 3: print \"\\nOne", "into a list of cumulative times so that the rest of the pipeline", "(e.g. \\\"Emmanuel: Hi, I'd like to ask you a few questions...\\\"), please input", "placeBasedTimestamping = False #ES: resample subtitles to prevent cut-up phrases, lone-word subtitles, and", "c += 1 #ES: printing deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c: \" + str(c)", "= True #ES: cut video into snippets based on the transcript's timestamps (must", "file for your video? (y) \") answer = verify_y_n_none(answer) if answer == 'y':", "pickle.load(f) except Exception as e: print e print \"\\nThe program is unable to", "1 for s in splits: print c,videoids[c-1] service.videos().delete( id=videoids[c-1] ).execute() c += 1", "request = service.videos().insert( body=resource, media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True), **kwargs ) vid = resumable_upload(request, 'video',", "Mb).\" yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits)) + \" parts &", "begin snipping\" yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits)) + \" parts\"", "#ES: download snippet subtitle files (.vtt) downloadCaptions = True #ES: delete uploaded video", "raw_input(\"If this is your first time running this tool on the files you", "+ \"/\" + originalVideo, s[0], s[1], targetname=folderName + \"/\" + fileName + \"_\"", "= [] wait = False if uploadTranscripts == True: #print splits,videoids #uploads transcripts", "str(len(splits)) + \" parts & uploading videos...\" time.sleep(1) if len(videoids) > 0: print", "in the next time through the # \"for pa in range ...\" loop,", "these exceptions are raised. RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader,", "will aggregate phrases (t) into one list item (a text) until a timestamp", "\"\\nSnipping completed. No further options were selected. Exiting...\" exit() #ES: UPLOADS THE VIDEOS", "= True with open(folderName + \"/\" + 'captionsids.pkl', 'wb') as f: pickle.dump(captionsids, f)", "#ES: if the beginning of the line is not a digit and is", "== '': placeBasedTimestamping = False print \"\\n\" folderName = raw_input(\"Enter the name of", "not recommended since subtitle units tend to become excessively long) (n) \") answer", "and several hours, depending on the size of your video file (\" +", "order and that there are no mistakes (see README.md) and restart the program.\"", "from Youtube? (y) \") answer = verify_y_n_none(answer) if answer == 'y': downloadCaptions =", "answer == 'n': resumeUploads = False elif answer == '': resumeUploads = False", "### START BOILERPLATE CODE # Sample Python code for user authorization import httplib2", "to True for other processes to run) snipVideos = True #ES: upload video", "file found because you are not running the entire pipeline. Creating dummy file", "#print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print splits #print \"Wait\" #time.sleep(30) c = 0 #print splits", "True #ES: the following switches control how subtitles are altered when concatenating snippets", "Would you like your uploaded video snippets to be deleted from Youtube once", "\"c: \" + str(c) with open(folderName + \"/\" + fileName + \"_\" +", "cut into \" + str(len(splits)) + \" text snippets based on it containing", "cc = \"0\" + str(c) else: cc = str(c) #print subtitle print cc", "as needed - ES #adjust switches as needed sleepingTime = 400 #___SWITCHES(defaults)___# #ES:", "See full sample for function kwargs = remove_empty_kwargs(**kwargs) # See full sample for", "print \"\\n1. Slicing into \" + str(len(splits)) + \" parts & uploading videos...\"", "snippets will created. Continue?\" print \"\\nIf all input was correct, the program will", "and txt files are stored #folderName = 'venant' #fileName refers to the name", "\"_\" + str(c) + \".txt\", 'w') try: #ES: write the previous position of", "else: a = raw_input(\"Please answer 'y' or 'n', or leave the answer blank", "str(2 * sleepingTime / 60) + \" minutes...\" time.sleep(2 * sleepingTime) else: if", "else: cc = str(c) #print subtitle print cc with open(folderName + \"/\" +", "open(folderName + \"/\" + fileName + \"_\" + str(c) + \".txt\", 'w') as", "README.md) and restart the program.\" exit() sp1 = sp num+=1 print \"\\nThe document", "answer = raw_input(\"\\n7.1 Would you like to reorganize subtitles according to punctuation? (Experimental;", "Storage(\"youtube-api-snippets-oauth2.json\") credentials = storage.get() if credentials is None or credentials.invalid: credentials = run_flow(flow,", "= a.lower().strip() if a == 'y' or a == 'n': return a else:", "True: print \"\\nDownloading captions...\" c = 1 waitLonger = True for s in", "Script will resume in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) #search_response =", "os.stat(folderName + '/' + fileName + '.txt').st_size except Exception as e: print e", "of inserted resource. if properties[p]: if is_array: ref[key] = properties[p].split(',') else: ref[key] =", "recommended, since some timestamp/subtitle units can end up being excessively large) fullSentenceSubtitles =", "running the tool, simply leave the following answers blank. For more advanced users", "True) #ES A feature created by RG that has yet to be explored...", "that contains # the OAuth 2.0 information for this application, including its client_id", "t_1 = time[1].split(\":\") t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2])) t1 = float(int(t_1[0])*3600)", "remove any previously-uploaded videos if the videos you are uploading are identical. If", "the following: \" + str(videoids) #print videoids if resumeUploads == True or deleteVideos", "= \"S.G.\" #interviewee = \"O.G.\" #folderName = 'oscar' #fileName = 'oscar' #originalVideo =", "to resume a # failed upload. def resumable_upload(request, resource, method): response = None", "called... # Call the API's captions.insert method to upload a caption track in", "raw_input(\"\\n7.2 Would you like to reorganize subtitles according to the presence of place", "It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(2 * sleepingTime /", "#ES: open anita/Anita.txt as myfile try: with open(folderName + \"/\" + fileName +", "it appears in the transcript: \") print \"\\n\" #____________# # let rodolphe know", "is unable to resume uploads because there are no uploads to resume or", "= service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=videoids[c-1], language=language, name=media_file, isDraft=True, sync=True ) ), media_body=caption_file", "+ 'captionsids.pkl', 'wb') as f: pickle.dump(captionsids, f) print \"Waiting for transcripts to be", "= t0 + t1 else: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t1])", "int(float(t_1[1])*60) + int(float(t_1[2])) return [t0,t1] def s_to_hms(seconds): m, sec = divmod(seconds, 60) h,", "of the video file including its ext #originalVideo = \"venant.mp4\" #interviewer = \"E.H.\"", "finish pipeline.\" foo = open(folderName + \"/\" + \"delete me.txt\",\"w+\") foo.close() with open(folderName", "stitches these subtitle files together into a single subtitle file for your video.\\n\\nYou", "different functionality down the road combine_only = True fileName = raw_input(\"In order to", "is your first time running this tool on the files you have indicated,", "try: print \"Uploading file...\" status, response = request.next_chunk() if response is not None:", "60) + \" minutes...\" time.sleep(2 * sleepingTime) else: if downloadCaptions == True: with", "int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1) t0 = t0 + t1 else: t1 = int(t[0])*60 +", "u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM',", "method): response = None error = None retry = 0 while response is", "uploaded video.', 'snippet.tags[]': '', 'snippet.title': fileName, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable':", "in t: #increase pos on texts by 1 c += 1 #ES: printing", "like \"snippet.tags[]\" to snippet.tags, but handle # the value as an array. if", "backoff strategy to resume a # failed upload. def resumable_upload(request, resource, method): response", "a playlist in youtube online and copy url id to script #playlistID =", "snippet=dict( videoId=id, language=language, name=originalVideo, isDraft=True, sync=False ) ), media_body=caption_file ).execute() print \"\\nFull video", "'resampleSubtitles' (above), you have the option to make subtitle entries full sentences (not", "str(videoSize) + \" Mb available space on your hard drive to run this", "video_id, language, name, file): insert_result = youtube.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=video_id, language=language, name=name,", "the answer blank by hitting 'Enter': \") continue print \"\\n\\n\" print \"This application", "myfile: text = myfile.read().replace('\\n', '') #print \"ES: replace \\\\n with ''\" with open(folderName", "video is soon available on your Youtube channel for you to check and", "= storage.get() if credentials is None or credentials.invalid: credentials = run_flow(flow, storage, args)", "be deleted from Youtube once subtitles have been successfully generated? (n) \") answer", "response: print \"Video id '%s' was successfully uploaded.\" % response['id'] videoid = response['id']", "video uploads...\\n\" time.sleep(1) try: with open(folderName + \"/\" + 'videoids.pkl', 'rb') as f:videoids", "answer == '': removeLoneWords = False answer = raw_input(\"\\n7.2 Would you like to", "time through the # \"for pa in range ...\" loop, we will be", "Remove keyword arguments that are not set def remove_empty_kwargs(**kwargs): good_kwargs = {} if", "downloadCaptions = True elif answer == 'n': downloadCaptions = False elif answer ==", "of each video to which your subtitle files are associated. These values will", "space on your hard drive, and then restart the program.\" print \"exiting application...\"", "print \"Sleeping %f seconds and then retrying...\" % sleep_seconds time.sleep(sleep_seconds) return response['id'] if", "\"\\nThere is a problem with one of your timestamps:\" print \"Timestamp number #\",str(num+2),\"", "open(folderName + \"/\" + fileName + \"_\" + str(cc) + \".vtt\", 'w') as", "for your video? (y) \") answer = verify_y_n_none(answer) if answer == 'y': combineSubtitles", "in splits: print c,s,captionsids[c-1] sub_txt = \"\" # while waitLonger == True: #", "name=media_file, isDraft=True, sync=True ) ), media_body=caption_file ).execute() captionsids.append(a['id']) c += 1 #print a", "snippets\\n- downloads the text snippets as subtitle files (.vtt),\\n- stitches these subtitle files", "2 timestamps, at the least. #place-based time stamping can be set to True", "...\" loop, we will be setting a property in the # resource's \"snippet\"", "you like to reorganize subtitles according to punctuation? (Experimental; can lead to short,", "to the presence of place names? (Experimental) (n) \") answer = verify_y_n_none(answer) if", "en, fr, es, etc.):\\n(You can refer to the second column in http://www.loc.gov/standards/iso639-2/php/code_list.php for", "# Remove keyword arguments that are not set def remove_empty_kwargs(**kwargs): good_kwargs = {}", "code.)\\n\") if language != '': verifyLanguage = raw_input(\"\\nYou have entered '\" + language", "Given a key like \"snippet.title\", split into \"snippet\" and \"title\", where # \"snippet\"", "'insert') # See full sample for function return vid def hms_to_s(time): time =", "current directory. Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print", "create a list of timestamps demarcating the length of each video to which", "== 'n': combineSubtitles = False elif answer == '': combineSubtitles = True if", "c in texts (a chunk of text prior to timestamp) to thefile thefile.write(\"%s\\n\"", "because there are no uploads to resume or your 'videoids.pkl' file has gone", "\"title\", where # \"snippet\" will be an object and \"title\" will be a", "uploadTranscripts = True answer = raw_input(\"\\n5/7 Will you be downloading the generated subtitle", "as f: pickle.dump(videoids, f) else: if resumeUploads == True or deleteVideos == True", "Please input your interviewer's name as it appears in the transcript: \") interviewee", "thefile thefile.write(\"%s\\n\" % texts[c-1]) #time.sleep(.1) texts.append(\"\") texts[c] = \"\" #t = t.replace(\" \",", "(.txt) into text snippets based on its timestamps,\\n- snips the associated video accordingly", "make sure you have the available space on your hard drive, and then", "Slicing into \" + str(len(splits)) + \" parts\" time.sleep(1) for s in splits:", "variable specifies the name of a file that contains # the OAuth 2.0", "first time running this tool on the files you have indicated, you will", "elif answer == '': removeLoneWords = False answer = raw_input(\"\\n7.2 Would you like", "split between 2 timestamps, at the least. #place-based time stamping can be set", "== True: print \"\\nWaiting for videos to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script", "snipTranscript = True #ES: cut video into snippets based on the transcript's timestamps", "time.sleep(1) print \"You may terminate the application at any point by pressing Ctrl+C", "the option to make subtitle entries full sentences (not recommended, since some timestamp/subtitle", "blank by hitting 'Enter': \") continue print \"\\n\\n\" print \"This application creates subtitles", "snippets,\\n- uploads these video snippets to Youtube as private videos only visible to", "= False answer = raw_input(\"\\n4/7 Will you be uploading text snippets for syncing", "but handle # the value as an array. if key[-2:] == '[]': key", "answer == 'y': uploadTranscripts = True elif answer == 'n': uploadTranscripts = False", "502, 503, 504] # This method implements an exponential backoff strategy to resume", "any options for running this application. Exiting...\" exit() while True: language = raw_input(\"Enter", "your transcript, simply leave the following two answers blank by pressing the 'Enter'", "simply leave the following two answers blank by pressing the 'Enter' key.\" time.sleep(1)", "a == 'n': return a else: a = raw_input(\"Please answer 'y' or 'n':", "is # missing. MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\" # Authorize the", "timestamps,\\n- snips the associated video accordingly into video snippets,\\n- uploads these video snippets", "sync the video and text snippets\\n- downloads the text snippets as subtitle files", "video/caption upload process was terminated unexpectedly before and you want to continue where", "= True if combineSubtitles == True: answer = raw_input(\"\\n7.1 Would you like to", "the program will begin snipping and uploading content to Youtube for processing. This", "in \" + str(2) + \" minutes...\" # time.sleep(120) sub_txt += subtitle cc", "would like to run: \\n\\n\" time.sleep(2) answer = raw_input(\"\\n1/7 Will you be cutting", "\"\\nVerifying if timestamps are in ascending order...\" sp1 = 0 num = 0", "u'WpreZ_gbEyw'] #with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: # pickle.dump(videoids, f)", "\"_\" + str(cc) + \".vtt\", 'w') as thefile: #thefile.write(sub_txt) thefile.write(subtitle) if cc ==", "body=resource, media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True), **kwargs ) vid = resumable_upload(request, 'video', 'insert') # See", "video for which you have an associated transcript. Make sure you have gone", "= True #ES: upload video snippets uploadVideos = True #ES: if the video/caption", "#interviewer = \"M.M.\" #interviewee = \"B.K.\" #fileName = 'Berthe' #originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer", "== 'n': snipVideos = False elif answer == '': snipVideos = True answer", "print \"\\n\" videoSize = os.stat(folderName + '/' + originalVideo).st_size/1000000 answer = raw_input(\"If this", "= False answer = raw_input(\"\\n7.1.2 Would you like to reorganize subtitles to remove", "several print commands were added for guidance. they can be removed. #ES: a", "(into 3) t = t.split(\":\") if len(t) > 3 or len(t) < 3:", "sync=False ) ), media_body=caption_file ).execute() print \"\\nFull video is soon available on your", "= insert_result[\"snippet\"][\"name\"] language = insert_result[\"snippet\"][\"language\"] status = insert_result[\"snippet\"][\"status\"] #print \"Uploaded caption track '%s(%s)", "to be deleted from Youtube once subtitles have been successfully generated? (n) \")", "of your video (this time including the file's extention): \") try: verifyExistence =", "u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY',", "\"/\" + 'captionsids.pkl', 'rb') as f: captionsids = pickle.load(f) #if wait == True:", "is not None: print error retry += 1 if retry > MAX_RETRIES: exit(\"No", "subtitle c += 1 time.sleep(3) #deletes videos from youtube -ES if deleteVideos ==", "c += 1 time.sleep(10) if combineSubtitles == True: #compiles them all print \"\\nCombining", "speaker names (e.g. the interviewer or interviewee's names) that precede their discourse (e.g.", "function kwargs = remove_empty_kwargs(**kwargs) # See full sample for function results = service.playlistItems().insert(", "print \"\\nVerifying if timestamps are in ascending order...\" sp1 = 0 num =", "and therefore the following condition is never met. if t != \"\" and", "= True #ES: if the video/caption upload process was terminated unexpectedly before and", "parts\" time.sleep(1) for s in splits: c += 1 if c > len(videoids):", "subtitle snippets ...\" #ES: this is a feature that needs exploration so as", "error is not None: print error retry += 1 if retry > MAX_RETRIES:", "deleteVideos == True or uploadTranscripts == True: with open(folderName + \"/\" + 'videoids.pkl',", "= folderName + '/' + fileName + \".srt\" service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=id,", "video accordingly into video snippets,\\n- uploads these video snippets to Youtube as private", "let rodolphe know if there is a problem with playlist id, might need", "choppy, fast subtitles that are hard to read) (n) \") answer = verify_y_n_none(answer)", "you will temporarily require \" + str(videoSize) + \" Mb available space on", "\\\".txt\\\" extention): \") else: print \"You have not chosen any options for running", "printing deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c: \" + str(c) with open(folderName + \"/\"", "codes is raised. RETRIABLE_STATUS_CODES = [500, 502, 503, 504] # This method implements", "\" + str(len(splits)) + \" video snippets will created. Continue?\" print \"\\nIf all", "len(prop_array)): is_array = False key = prop_array[pa] # Convert a name like \"snippet.tags[]\"", "int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t0+t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1)", "f) else: if resumeUploads == True or deleteVideos == True or uploadTranscripts ==", "be set to True for other processes to run) snipTranscript = True #ES:", "if reply[0] == '': return True if reply[0] == 'n': exit() if uploadVideos", "verify_y_n_none(answer) if answer == 'y': combineSubtitles = True elif answer == 'n': combineSubtitles", "splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print splits #print \"Wait\" #time.sleep(30) c = 0 #print splits videoids", "answer == '': placeBasedTimestamping = False print \"\\n\" folderName = raw_input(\"Enter the name", "answer = verify_y_n_none(answer) if answer == 'y': downloadCaptions = True elif answer ==", "#fileName = 'venant' #originalVideo refers to the name of the video file including", "verifyExistence = os.stat(folderName + '/' + fileName + '.txt').st_size except Exception as e:", "create a client secret file: google apis dashboard --> create a new project", "names) that precede their discourse (e.g. \\\"Emmanuel: Hi, I'd like to ask you", "\"\\nResuming video uploads...\\n\" time.sleep(1) try: with open(folderName + \"/\" + 'videoids.pkl', 'rb') as", "#folderName = 'venant' #fileName refers to the name of the input .txt file", "switches control how subtitles are altered when concatenating snippets (i.e. when combineSubtitles =", "+ \"_\" + str(c) + \".mp4\" if not os.path.exists(media_file): exit('Please specify a valid", "\"\\n\" #____________# # let rodolphe know if there is a problem with playlist", "time.sleep(sleepingTime) #search_response = service.search().list( # q=\"Anita\", # part=\"id\", # type=\"video\", # fields=\"items/id\" #).execute()", "+= 1 wait = True with open(folderName + \"/\" + 'videoids.pkl', 'wb') as", "vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]':", "raw_input(\"\\n3/7 Will you be resuming video uploads from a previously-initiated process? (n) \")", "\"\\nUploading transcripts...\" for s in splits: print c,s media_file = folderName + '/'", "functionality down the road combine_only = True fileName = raw_input(\"In order to accurately", "a property in that object. prop_array = p.split('.') ref = resource for pa", "if answer == 'y': uploadTranscripts = True elif answer == 'n': uploadTranscripts =", "This method implements an exponential backoff strategy to resume a # failed upload.", "response) except HttpError, e: if e.resp.status in RETRIABLE_STATUS_CODES: error = \"A retriable HTTP", "= raw_input(\"\\n6.3.1 Please input your interviewer's name as it appears in the transcript:", "m = divmod(m, 60) #print str(int(h)) + \":\" + str(int(m)) + \":\" +", "t is a timestamp #ES: removing punctuation from '[00:00:01.09]' since it is never", "how many videos will be uploaded. question = \"\\nThere were \" + str(len(splits))", "= build_resource(properties) # See full sample for function kwargs = remove_empty_kwargs(**kwargs) # See", "an associated transcript. Make sure you have gone over README.md before proceeding.\" time.sleep(1)", "if snipTranscript == True: for t in text: #add a \\n to the", "sp[1] <= sp1[1]: print \"\\nThere is a problem with one of your timestamps:\"", "This may take between 20 minutes and several hours, depending on the size", "= ref[key]\" means that in the next time through the # \"for pa", "= True elif answer == 'n': uploadTranscripts = False elif answer == '':", "subtitle file for your video? (y) \") answer = verify_y_n_none(answer) if answer ==", "and requires requests to use an SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME =", "503, 504] # This method implements an exponential backoff strategy to resume a", "property is \"snippet.title\", but the resource does # not yet have a \"snippet\"", "+ \" timestamps detected in \" + fileName + \". \" + str(len(splits))", "+ originalVideo, s[0], s[1], targetname=folderName + \"/\" + fileName + \"_\" + str(c)", "localtime()),\". Script will resume in \" + str(2 * sleepingTime / 60) +", "+ \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) else: if resumeUploads ==", "indicated, you will temporarily require \" + str(videoSize) + \" Mb available space", "vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage': language, 'snippet.description': 'Description of uploaded", "together into a single subtitle file for your video.\\n\\nYou may switch these processes", "video uploads from a previously-initiated process? (n) \") answer = verify_y_n_none(answer) if answer", "not set def remove_empty_kwargs(**kwargs): good_kwargs = {} if kwargs is not None: for", "the following answers blank. For more advanced users or users who have already", "your transcript (.txt) into text snippets based on its timestamps,\\n- snips the associated", "print \"exiting application...\" time.sleep(2) exit() print \"\\n\" originalVideo = raw_input(\"Enter the file name", "'y': break #if combineSubtitles == True: print \"\\n\\n\" print \"\\n6.3 If your transcript", "its ext #originalVideo = \"venant.mp4\" #interviewer = \"E.H.\" #interviewee = \"E.M.\" #fileName =", "+ \"/\" + fileName + \"_\" + str(c) + \".txt\", 'w') as thefile:", "u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with open(folderName + \"/\" +", "''}, part='snippet', onBehalfOfContentOwner='') print \"Waiting for full video to be processed. It is\",strftime(\"%H:%M:%S\",", "file for your video.\\n\\nYou may switch these processes 'on' or 'off' depending on", "\"The file named '\" + fileName + \".txt' does not exist in the", "(this time including the file's extention): \") try: verifyExistence = os.stat(folderName + '/'", "END BOILERPLATE CODE # Sample python code for videos.insert def videos_insert(properties, media_file, **kwargs):", "fullSentenceSubtitles = False answer = raw_input(\"\\n7.1.2 Would you like to reorganize subtitles to", "language != '': verifyLanguage = raw_input(\"\\nYou have entered '\" + language + \"'", "for this application, including its client_id and # client_secret. \"\"\" to create a", "subtitles to prevent cut-up phrases, lone-word subtitles, and improve the subtitle structure overall", "text snippets to Youtube as transcript files for these video snippets,\\n- allows Youtube", "request.next_chunk() if response is not None: if method == 'insert' and 'id' in", "'n': resampleSubtitles = False elif answer == '': resampleSubtitles = False if resampleSubtitles", "try: with open(folderName + \"/\" + 'videoids.pkl', 'rb') as f:videoids = pickle.load(f) except", "= 'oscar' #originalVideo = \"Oscar.mp4\" ### START BOILERPLATE CODE # Sample Python code", "Slicing into \" + str(len(splits)) + \" parts & uploading videos...\" time.sleep(1) if", "are in ascending order...\" sp1 = 0 num = 0 #print str(splits) #print", "INPUT TEXT FOR PROCESSING if snipTranscript == True: for t in text: #add", "vid, 'snippet.position': ''}, part='snippet', onBehalfOfContentOwner='') print \"Waiting for full video to be processed.", "the \\\".txt\\\" extention): \") else: print \"You have not chosen any options for", "name of your transcript (excluding the \\\".txt\\\" extention): \") try: verifyExistence = os.stat(folderName", "named '\" + originalVideo + \"' does not exist in the folder '\"", "then restart the program.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" elif combineSubtitles", "splits: if num > 0: if sp[1] <= sp1[1]: print \"\\nThere is a", "\"exiting application...\" time.sleep(2) exit() print \"\\n\" elif combineSubtitles == True: #in this case,", "text = myfile.read().replace('\\n', '') #print \"ES: replace \\\\n with ''\" with open(folderName +", "c += 1 time.sleep(3) #deletes videos from youtube -ES if deleteVideos == True:", "if combineSubtitles == True: #compiles them all print \"\\nCombining subtitle snippets ...\" #ES:", "import imageio imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip import time from time import strftime,localtime", "+ fileName + \"_\" + str(c) + \".mp4\" if not os.path.exists(media_file): exit('Please specify", "c = 1 waitLonger = True for s in splits: print c,s,captionsids[c-1] sub_txt", "prevent cut-up phrases, lone-word subtitles, and improve the subtitle structure overall (can lead", "character variables import imageio imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip import time from time", "combineSubtitles = True) #ES A feature created by RG that has yet to", "= \"youtube\" API_VERSION = \"v3\" # This variable defines a message to display", "= False if uploadTranscripts == True: #print splits,videoids #uploads transcripts print \"\\nUploading transcripts...\"", "for processing your files. Continue? (y/n) \") if verifyLanguage.lower() == '' or 'y':", "elif answer == 'n': removeLoneWords = False elif answer == '': removeLoneWords =", "to position c of texts texts[c] += t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this will", "SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME = \"youtube\" API_VERSION = \"v3\" # This", "= True #ES: the following switches control how subtitles are altered when concatenating", "text) until a timestamp is reached #ES: if t is a timestamp #ES:", "+\".mp4\") media_file = folderName + '/' + fileName + \"_\" + str(c) +", "== 'n': deleteVideos = False elif answer == '': deleteVideos = False answer", "True if reply[0] == '': return True if reply[0] == 'n': exit() if", "60) h, m = divmod(m, 60) #print str(int(h)) + \":\" + str(int(m)) +", "= False key = prop_array[pa] # Convert a name like \"snippet.tags[]\" to snippet.tags,", "which processes you would like to run: \\n\\n\" time.sleep(2) answer = raw_input(\"\\n1/7 Will", "texts[c] = \"\" #t = t.replace(\" \", \"\") #t = t t =", "instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" videoSize = os.stat(folderName + '/'", "allows some different functionality down the road combine_only = True fileName = raw_input(\"In", "if error is not None: print error retry += 1 if retry >", "True elif answer == 'n': snipVideos = False elif answer == '': snipVideos", "'n': \") continue def verify_y_n_none(a): while True: a = a.lower().strip() if a ==", "arguments that are not set def remove_empty_kwargs(**kwargs): good_kwargs = {} if kwargs is", "run_flow(flow, storage, args) # Trusted testers can download this discovery document from the", "myfile.read().replace('\\n', '') #print \"ES: replace \\\\n with ''\" with open(folderName + \"/\" +", "fileName + \". \" + str(len(splits)) + \" video snippets will therefore be", "#ES: combine vtt snippets that were downloaded from Youtube into a total subtitle", "the resource does # not yet have a \"snippet\" object. Create the snippet", "and \"title\", where # \"snippet\" will be an object and \"title\" will be", "t != \"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\" in t: #increase pos on texts", "'.txt').st_size except Exception as e: print e print \"The file named '\" +", "are handling retry logic ourselves. httplib2.RETRIES = 1 # Maximum number of times", "have an associated transcript. Make sure you have gone over README.md before proceeding.\"", "were downloaded from Youtube into a total subtitle file. combineSubtitles = True #ES:", "for other processes to run) snipTranscript = True #ES: cut video into snippets", "combineSubtitles == True: #in this case, the user has chosen to only combine", "refers to the name of the input .txt file (excluding .txt) #fileName =", "= a.lower().strip() if a == 'y' or a == 'n' or a ==", "the switch combine_only allows some different functionality down the road combine_only = True", "successfully uploaded.\" % response['id'] videoid = response['id'] elif method != 'insert' or 'id'", "once subtitle processing is complete deleteVideos = False #ES: upload the full video", "first time running the tool, simply leave the following answers blank. For more", "from oauth2client.tools import argparser, run_flow # The CLIENT_SECRETS_FILE variable specifies the name of", "videos from youtube -ES if deleteVideos == True: print \"\\nDeleting videos...\\n\" c =", "u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids =", "cut-up phrases, lone-word subtitles, and improve the subtitle structure overall (can lead to", "sample for function request = service.videos().insert( body=resource, media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True), **kwargs ) vid", "playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId': vid, 'snippet.position': ''}, part='snippet', onBehalfOfContentOwner='') print \"Waiting for full", "\" Mb).\" yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits)) + \" parts", "resource # Remove keyword arguments that are not set def remove_empty_kwargs(**kwargs): good_kwargs =", "have the option to remove subtitle entries which may be a single word", "media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True), **kwargs ) vid = resumable_upload(request, 'video', 'insert') # See full", "if len(t) == 2: if combine_only == True: t1 = int(t[0])*60 + int(t[1])", "= \"\\nThere were \" + str(len(splits)) + \" timestamps detected in \" +", "need to create a playlist in youtube online and copy url id to", "\".srt\" service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=id, language=language, name=originalVideo, isDraft=True, sync=False ) ), media_body=caption_file", "(why?) t += \"\\n\" #ES: if the beginning of the line is not", "0 #print str(splits) #print str(t_list) for sp in splits: if num > 0:", "terminate the application at any point by pressing Ctrl+C (Cmd+C on Mac).\" time.sleep(1)", "uploaded video snippets to be deleted from Youtube once subtitles have been successfully", "snipVideos = True #ES: upload video snippets uploadVideos = True #ES: if the", "retry.\") max_sleep = 2 ** retry sleep_seconds = random.random() * max_sleep print \"Sleeping", "as subtitle files (.vtt),\\n- stitches these subtitle files together into a single subtitle", "#ES: this will aggregate phrases (t) into one list item (a text) until", "\") answer = verify_y_n_none(answer) if answer == 'y': placeBasedTimestamping = True elif answer", "resource = {} for p in properties: # Given a key like \"snippet.title\",", "= \"E.H.\" #interviewee = \"E.M.\" #fileName = 'Frederic' #originalVideo = \"Frederic.mov\" #interviewer =", "Always retry when these exceptions are raised. RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected, httplib.IncompleteRead,", "reorganize subtitles to remove lone words? (Experimental) (n) \") answer = verify_y_n_none(answer) if", "this into a list of cumulative times so that the rest of the", "smaller.\" print \"Please make sure your timestamps are in ascending order and that", "raw_input(\"Enter the file name of your transcript (excluding the \\\".txt\\\" extention): \") try:", "+ \" minutes...\" time.sleep(sleepingTime) #search_response = service.search().list( # q=\"Anita\", # part=\"id\", # type=\"video\",", "\"_\" + str(c) + \".flv\" caption_file = folderName + '/' + fileName +", "= False elif answer == '': uploadTranscripts = True answer = raw_input(\"\\n5/7 Will", "know if there is a problem with playlist id, might need to create", "the # authenticated user's account and requires requests to use an SSL connection.", "...\" #ES: this is a feature that needs exploration so as to make", "file name of your timestamp list (excluding the \\\".txt\\\" extention): \") else: print", "'Enter': \") continue print \"\\n\\n\" print \"This application creates subtitles for a video", "# client_secret. \"\"\" to create a client secret file: google apis dashboard -->", "text file \"oscar4.txt\" #might have to be in a folder name called oscar", "combineSubtitles = True #ES: the following switches control how subtitles are altered when", "verifyExistence = os.stat(folderName).st_size except Exception as e: print e print \"The folder named", "subtitle files are associated. These values will be used as offsets for accurately", "if the beginning of the line is not a digit and is not", "called when videos are being uploaded (uploadVideos = True) to warn the user", "= False elif answer == '': deleteVideos = False answer = raw_input(\"\\n7/7 Will", "'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': media_file, 'status.embeddable': '', 'status.license': '',", "'y': downloadCaptions = True elif answer == 'n': downloadCaptions = False elif answer", "sample for function results = service.playlistItems().insert( body=resource, **kwargs ).execute() print_results(results) #'snippet.playlistId': playlistID, playlist_items_insert(", "does not exist in the folder '\" + folderName + \"'. Please see", "to short, choppy, fast subtitles that are hard to read) (n) \") answer", "since subtitle units tend to become excessively long) (n) \") answer = verify_y_n_none(answer)", "not in response: print response else: exit(\"The upload failed with an unexpected response:", "if the video/caption upload process was terminated unexpectedly before and you want to", "for sp in splits: if num > 0: if sp[1] <= sp1[1]: print", "key, value in kwargs.iteritems(): if value: good_kwargs[key] = value return good_kwargs ### END", "= videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage': language, 'snippet.description': 'Description of uploaded video.',", "of uploaded video.', 'snippet.tags[]': '', 'snippet.title': media_file, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted',", "+ \" text snippets based on it containing \" + str(len(splits)) + \"", "appropriate two-letter 'ISO 639-1' language code.)\\n\") if language != '': verifyLanguage = raw_input(\"\\nYou", "% (search_result[\"id\"][\"videoId\"])) # #print \"Videos:\\n\", \"\\n\".join(videos), \"\\n\" #ES: I don't think this function", "\" minutes...\" time.sleep(sleepingTime) #search_response = service.search().list( # q=\"Anita\", # part=\"id\", # type=\"video\", #", "'\" + folderName + \"'. Please see README.md for instructions.\" print \"exiting application...\"", "units tend to become excessively long) (n) \") answer = verify_y_n_none(answer) if answer", "problem with playlist id, might need to create a playlist in youtube online", "time.sleep(120) sub_txt += subtitle cc = \"\" if c < 10: cc =", "from oauth2client.file import Storage from oauth2client.tools import argparser, run_flow # The CLIENT_SECRETS_FILE variable", "== (len(prop_array) - 1): # Leave properties without values out of inserted resource.", "when combineSubtitles = True) #ES A feature created by RG that has yet", "from your Youtube account once subtitle processing is complete deleteVideos = False #ES:", "\"\\n\" originalVideo = raw_input(\"Enter the file name of your video (this time including", "option to make subtitle entries full sentences (not recommended, since some timestamp/subtitle units", "\".txt' does not exist in the folder '\" + folderName + \"'. Please", "= float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2])) t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2]))", "u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids", "a previously-initiated process? (n) \") answer = verify_y_n_none(answer) if answer == 'y': resumeUploads", "str(c) + \".txt\" #print s,media_file,caption_file,videoids[c-1] a = service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=videoids[c-1], language=language,", "depending on the size of your video file (\" + str(videoSize) + \"", "flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage = Storage(\"youtube-api-snippets-oauth2.json\") credentials = storage.get() if credentials", "# codes is raised. RETRIABLE_STATUS_CODES = [500, 502, 503, 504] # This method", "\"' as the language code for your transcript and video files. Youtube will", "video to which your subtitle files are associated. These values will be used", "-ES if deleteVideos == True: print \"\\nDeleting videos...\\n\" c = 1 for s", "answer == 'n': resampleSubtitles = False elif answer == '': resampleSubtitles = False", "files.\\nEach timestamp should be written as follows [HH:MM:SS.00], followed by a newline.\\n\\nPlease enter", "upload. def resumable_upload(request, resource, method): response = None error = None retry =", "char #ES: removing punctuation from '[00:00:01.09]' since it is never qualified as a", "\"splits: \" + str(splits) #for i in splits: # print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print", "fields=\"items/id\" #).execute() # #videos = [] # #for search_result in search_response.get(\"items\", []): #", "= \"client_secret.json\" #api key is <KEY> #client id is in client_id.json CLIENT_SECRETS_FILE =", "is the following\" + str(text) #ES: strip whitespace text = [x.strip() for x", "'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, media_file, part='snippet,status') videoids.append(vid) print videoids", "+ language + \"' as the language code for your transcript and video", "except Exception as e: print e print \"The file named '\" + fileName", "u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q',", "+ \".txt\" #print s,media_file,caption_file,videoids[c-1] a = service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=videoids[c-1], language=language, name=media_file,", "str(c) + \".flv\" caption_file = folderName + '/' + fileName + \"_\" +", "return vid def hms_to_s(time): time = unicode(time, \"UTF-8\") time = time.split(\" --> \")", "interviewer = raw_input(\"\\n6.3.1 Please input your interviewer's name as it appears in the", "hard to read) (n) \") answer = verify_y_n_none(answer) if answer == 'y': resampleSubtitles", "the following condition is almost always met. if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t !=", "ref[key]\" means that in the next time through the # \"for pa in", "and # client_secret. \"\"\" to create a client secret file: google apis dashboard", "f: pickle.dump(videoids, f) if wait == True: print \"\\nWaiting for videos to be", "number of times to retry before giving up. MAX_RETRIES = 10 # Always", "never split between 2 timestamps, at the least. #place-based time stamping can be", "Mac).\" time.sleep(1) print \"\\n\" print \"This tool:\\n- snips your transcript (.txt) into text", "import pickle import os #adjust sleeping time as needed - ES #adjust switches", "subtitle entries full sentences (not recommended, since some timestamp/subtitle units can end up", "True answer = raw_input(\"\\n6/7 Would you like your uploaded video snippets to be", "insert_result[\"snippet\"][\"status\"] #print \"Uploaded caption track '%s(%s) in '%s' language, '%s' status.\" % (name,", "int(t[2]) t_list.append(t1) t0 = t0 + t1 else: t1 = int(t[0])*3600 + int(t[1])*60", "(Experimental; can lead to short, choppy, fast subtitles that are hard to read)", "raw_input(\"\\n4/7 Will you be uploading text snippets for syncing with your video snippets?", "advanced users or users who have already used this tool, please select which", "a else: a = raw_input(\"Please answer 'y' or 'n': \") continue def verify_y_n_none(a):", "into video snippets,\\n- uploads these video snippets to Youtube as private videos only", "True: if downloadCaptions == True: print \"\\nDownloading captions...\" c = 1 waitLonger =", "can properly run.\" exit() #ES print texts[c] #print \"splits: \" + str(splits) #for", "timestamps, at the least. #place-based time stamping can be set to True or", "or False (make a variable for this) compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile =", "the video and txt files are stored #folderName = 'venant' #fileName refers to", "#'snippet.playlistId': playlistID, playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId': vid, 'snippet.position': ''}, part='snippet', onBehalfOfContentOwner='') print \"Waiting", "flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser, run_flow # The CLIENT_SECRETS_FILE", "\"\\nIf all input was correct, the program will begin snipping and uploading content", "answer == '': resumeUploads = False answer = raw_input(\"\\n4/7 Will you be uploading", "# Authorize the request and store authorization credentials. def get_authenticated_service(args): flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,", "Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" if", "num+=1 print \"\\nThe document named '\" + fileName + \".txt' was cut into", "#print videoids if resumeUploads == True or deleteVideos == True or uploadTranscripts ==", "build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http())) # Explicitly tell the underlying HTTP transport library not to", "from Youtube once subtitles have been successfully generated? (n) \") answer = verify_y_n_none(answer)", "your uploaded video snippets to be deleted from Youtube once subtitles have been", "subtitle files, you will need to create a list of timestamps demarcating the", "else: print \"You have not chosen any options for running this application. Exiting...\"", "storage, args) # Trusted testers can download this discovery document from the developers", "a digit (False) and therefore the following condition is almost always met. if", "sure you have the available space on your hard drive, and then restart", "answer == 'y': removeLoneWords = True elif answer == 'n': removeLoneWords = False", "answer == '': uploadVideos = True answer = raw_input(\"\\n3/7 Will you be resuming", "\"WARNING: Please configure OAuth 2.0\" # Authorize the request and store authorization credentials.", "properties given as key-value pairs. # Leave properties with empty values out of", "resource, method): response = None error = None retry = 0 while response", "resampleSubtitles = False elif answer == '': resampleSubtitles = False if resampleSubtitles ==", "elif answer == '': fullSentenceSubtitles = False answer = raw_input(\"\\n7.1.2 Would you like", "are hard to read) resampleSubtitles = False #ES: IF you enabled 'resampleSubtitles' (above),", "answer == 'n': snipVideos = False elif answer == '': snipVideos = True", "True answer = raw_input(\"\\n3/7 Will you be resuming video uploads from a previously-initiated", "#if combineSubtitles == True: print \"\\n\\n\" print \"\\n6.3 If your transcript has speaker", "apiclient.http import MediaFileUpload from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools", "= verify_y_n_none(answer) if answer == 'y': removeLoneWords = True elif answer == 'n':", "subtitles. the switch combine_only allows some different functionality down the road combine_only =", "+ \".txt\", 'w') try: #ES: write the previous position of c in texts", "there are no uploads to resume or your 'videoids.pkl' file has gone missing.", "properties with empty values out of the inserted resource. def build_resource(properties): resource =", "the file name of your timestamp list (excluding the \\\".txt\\\" extention): \") else:", "str(int(sec)) #ES: open anita/Anita.txt as myfile try: with open(folderName + \"/\" + fileName", "full sample for function return vid def hms_to_s(time): time = unicode(time, \"UTF-8\") time", "was cut into \" + str(len(splits)) + \" text snippets based on it", "#ES: IF you enabled 'resampleSubtitles' (above), you have the option to make subtitle", "= False #ES: upload the full video and compiled transcript to your Youtube", "for the appropriate two-letter 'ISO 639-1' language code.)\\n\") if language != '': verifyLanguage", "upload process was terminated unexpectedly before and you want to continue where you", "still be set to True): resumeUploads = False #ES: upload snippet transcripts (.txt)", "verify_y_n_none(answer) if answer == 'y': resampleSubtitles = True elif answer == 'n': resampleSubtitles", "your account,\\n- uploads the text snippets to Youtube as transcript files for these", "= True elif answer == 'n': fullSentenceSubtitles = False elif answer == '':", "print \"\\n\" #____________# # let rodolphe know if there is a problem with", "= 'venant' #fileName refers to the name of the input .txt file (excluding", "program is unable to resume uploads because there are no uploads to resume", "videos are being uploaded (uploadVideos = True) to warn the user as to", "time = time.split(\" --> \") t_0 = time[0].split(\":\") t_1 = time[1].split(\":\") t0 =", "uploadVideos == True: #ES: the following is called when videos are being uploaded", "of the following: \" + str(videoids) #print videoids if resumeUploads == True or", "+ str(len(splits)) + \" video snippets will created. Continue?\" print \"\\nIf all input", "strftime,localtime from postprocess_and_fuse_subs import compileSubs import pickle import os #adjust sleeping time as", "myfile: text = myfile.read().replace('\\n', '') with open(folderName + \"/\" + \"delete me.txt\") as", "'': resampleSubtitles = False if resampleSubtitles == True: answer = raw_input(\"\\n7.1.1 Would you", "see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" if snipVideos", "elif answer == '': downloadCaptions = True answer = raw_input(\"\\n6/7 Would you like", "new project on the resulting dashboard, \"enable apis and get credntials like keys\"", "= ref[key] return resource # Remove keyword arguments that are not set def", "if len(videoids) > 0: print \"(However, it looks like \",len(videoids),\" video snippets were", "method to upload a caption track in draft status. def upload_caption(youtube, video_id, language,", "video into video snippets (y) \") answer = verify_y_n_none(answer) if answer == 'y':", "\"enable apis and get credntials like keys\" search for youtube api click \"YouTube", "and store authorization credentials. def get_authenticated_service(args): flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage =", "be a property in that object. prop_array = p.split('.') ref = resource for", "but the resource does # not yet have a \"snippet\" object. Create the", "PROCESSING if snipTranscript == True: for t in text: #add a \\n to", "never qualified as a digit (False) and therefore the following condition is almost", "vtt snippets that were downloaded from Youtube into a total subtitle file. combineSubtitles", "no mistakes (see README.md) and restart the program.\" exit() sp1 = sp num+=1", "def resumable_upload(request, resource, method): response = None error = None retry = 0", "inside the 'files' folder): \") try: verifyExistence = os.stat(folderName).st_size except Exception as e:", "with open(folderName + \"/\" + fileName + \".txt\") as f: text = f.readlines()", "files are associated. These values will be used as offsets for accurately combining", "digit (False) and therefore the following condition is never met. if t !=", "uploaded to YouTube for processing. YouTube allows a maximum of 100 video uploads", "response = request.next_chunk() if response is not None: if method == 'insert' and", "= \"O.G.\" #folderName = 'oscar' #fileName = 'oscar' #originalVideo = \"Oscar.mp4\" ### START", "uploading videos...\" time.sleep(1) if len(videoids) > 0: print \"(However, it looks like \",len(videoids),\"", "waitLonger = True for s in splits: print c,s,captionsids[c-1] sub_txt = \"\" #", "are never split between 2 timestamps, at the least. #place-based time stamping can", "\"ES: text is the following\" + str(text) #ES: strip whitespace text = [x.strip()", "(Experimental) (n) \") answer = verify_y_n_none(answer) if answer == 'y': placeBasedTimestamping = True", "them all print \"\\nCombining subtitle snippets ...\" #ES: this is a feature that", "False print \"\\n\" folderName = raw_input(\"Enter the name of the folder containing your", "subtitle snippets from Youtube? (y) \") answer = verify_y_n_none(answer) if answer == 'y':", "in range(0, len(prop_array)): is_array = False key = prop_array[pa] # Convert a name", "the user as to how many videos will be uploaded. question = \"\\nThere", "video snippets to Youtube as private videos only visible to your account,\\n- uploads", "selected. Exiting...\" exit() #ES: UPLOADS THE VIDEOS if uploadVideos == True: #ES: the", "place names are never split between 2 timestamps, at the least. #place-based time", "of the transcript's timestamps t_list = [] #ES: PREPARE INPUT TEXT FOR PROCESSING", "will be used as offsets for accurately combining your subtitle files.\\nEach timestamp should", "+ '/' + fileName + \".srt\" service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=id, language=language, name=originalVideo,", "True: #in this case, the user has chosen to only combine subtitles. the", "by pressing Ctrl+C (Cmd+C on Mac).\" time.sleep(1) print \"\\n\" print \"This tool:\\n- snips", "True: for t in text: #add a \\n to the end of each", "according to the presence of place names? (Experimental) (n) \") answer = verify_y_n_none(answer)", "f: # pickle.dump(videoids, f) if resumeUploads == True: print \"\\nResuming video uploads...\\n\" time.sleep(1)", "YouTube allows a maximum of 100 video uploads per 24h using the current", "for s in splits: print c,s,captionsids[c-1] sub_txt = \"\" # while waitLonger ==", "README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" originalVideo = raw_input(\"Enter", "isDraft=True ) ), media_body=file ).execute() id = insert_result[\"id\"] name = insert_result[\"snippet\"][\"name\"] language =", "a key like \"snippet.title\", split into \"snippet\" and \"title\", where # \"snippet\" will", "names are never split between 2 timestamps, at the least. #place-based time stamping", "be set to True for other processes to run) snipVideos = True #ES:", "= verify_y_n_none(answer) if answer == 'y': fullSentenceSubtitles = True elif answer == 'n':", "hitting 'Enter': \") continue print \"\\n\\n\" print \"This application creates subtitles for a", "# Call the API's captions.insert method to upload a caption track in draft", "the resulting dashboard, \"enable apis and get credntials like keys\" search for youtube", "+ str(len(splits)) + \" timestamps formatted like such '[HH:MM:SS.00]'.\" else: print \"Please set", "completed. No further options were selected. Exiting...\" exit() #ES: UPLOADS THE VIDEOS if", "to be in a folder name called oscar #change certain character variables import", "being uploaded (uploadVideos = True) to warn the user as to how many", "the presence of place names? (Experimental) (n) \") answer = verify_y_n_none(answer) if answer", "random from apiclient.discovery import build from apiclient.errors import HttpError from apiclient.http import MediaFileUpload", "downloaded subtitle snippets into a single subtitle file for your video? (y) \")", "print error retry += 1 if retry > MAX_RETRIES: exit(\"No longer attempting to", "True: print \"\\nDeleting videos...\\n\" c = 1 for s in splits: print c,videoids[c-1]", "+ folderName + \"' does not exist in the current directory. Please see", "''\" with open(folderName + \"/\" + fileName + \".txt\") as f: text =", "the following\" + str(text) #ES: strip whitespace text = [x.strip() for x in", "print \"Waiting for full video to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will", "videos if the videos you are uploading are identical. If so, do this", "+ fileName + \".srt\" service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=id, language=language, name=originalVideo, isDraft=True, sync=False", "into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(2) +", "of each line (why?) t += \"\\n\" #ES: if the beginning of the", "video.', 'snippet.tags[]': '', 'snippet.title': media_file, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''},", "print \"\\n\" originalVideo = raw_input(\"Enter the file name of your video (this time", "{'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': fileName,", "a problem with one of your timestamps:\" print \"Timestamp number #\",str(num+2),\" (equivalent to", "True #ES: if the video/caption upload process was terminated unexpectedly before and you", "#interviewee = \"V.S.\" #where the video and txt files are stored #folderName =", "str(c) + \" \" + captionsids[c-1] + \" to be processed into captions.", "print \"\\nThere is a problem with one of your timestamps:\" print \"Timestamp number", "application, including its client_id and # client_secret. \"\"\" to create a client secret", "upload failed with an unexpected response: %s\" % response) except HttpError, e: if", "value return good_kwargs ### END BOILERPLATE CODE # Sample python code for videos.insert", "based on it containing \" + str(len(splits)) + \" timestamps formatted like such", "folderName + '/' + fileName + \"_\" + str(c) + \".txt\" #print s,media_file,caption_file,videoids[c-1]", "+ int(t[2]) t_list.append(t1) t0 = t0 + t1 else: t1 = int(t[0])*3600 +", "# except: # waitLonger = True # print \"Waiting for transcripts \" +", "are being uploaded (uploadVideos = True) to warn the user as to how", "document named '\" + fileName + \".txt' was cut into \" + str(len(splits))", "in splits: # print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print splits #print \"Wait\"", "uploading are identical. If so, do this manually on youtube.com and then restart", "current API credentials. Continue?\" print \"\\nIf all input was correct, the program will", "== True or resumeUploads == True or downloadCaptions == True or deleteVideos ==", "\"\\n\\n\" print \"This application creates subtitles for a video for which you have", "full video and compiled transcript to your Youtube account once complete uploadFull =", ".txt file (excluding .txt) #fileName = 'venant' #originalVideo refers to the name of", "video in custom playlist def playlist_items_insert(properties, **kwargs): resource = build_resource(properties) # See full", "video and text snippets\\n- downloads the text snippets as subtitle files (.vtt),\\n- stitches", "captions...\" c = 1 waitLonger = True for s in splits: print c,s,captionsids[c-1]", "str(raw_input(question+' (y/n): ')).lower().strip() if reply[0] == 'y': return True if reply[0] == '':", "IOError, httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine) # Always retry when an", "with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) if wait", "= open(folderName + \"/\" + fileName + \"_\" + str(c) + \".txt\", 'w')", "= True #ES: download snippet subtitle files (.vtt) downloadCaptions = True #ES: delete", "therefore be uploaded to YouTube for processing. YouTube allows a maximum of 100", "two-letter 'ISO 639-1' language code.)\\n\") if language != '': verifyLanguage = raw_input(\"\\nYou have", "service.videos().delete( id=videoids[c-1] ).execute() c += 1 time.sleep(10) if combineSubtitles == True: #compiles them", "= \"A retriable error occurred: %s\" % e if error is not None:", "If this does not apply to your transcript, simply leave the following two", "= \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer = \"S.G.\" #interviewee = \"O.G.\" #folderName = 'oscar' #fileName =", "written as follows [HH:MM:SS.00], followed by a newline.\\n\\nPlease enter the file name of", "return True if reply[0] == '': return True if reply[0] == 'n': exit()", "make subtitle entries full sentences (not recommended, since some timestamp/subtitle units can end", "texts = [\"\"] t0 = 0 c = 0 #ES: several print commands", "track in draft status. def upload_caption(youtube, video_id, language, name, file): insert_result = youtube.captions().insert(", "successfully generated? (n) \") answer = verify_y_n_none(answer) if answer == 'y': deleteVideos =", "\"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language = 'fr' #change these variables according to what story you want", "#print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t0 + t1 else:", "your first time running this tool on the files you have indicated, you", "+ \"/\" + \"delete me.txt\",\"w+\") foo.close() with open(folderName + \"/\" + \"delete me.txt\",", "names (e.g. the interviewer or interviewee's names) that precede their discourse (e.g. \\\"Emmanuel:", "into a single subtitle file for your video? (y) \") answer = verify_y_n_none(answer)", "#thefile = open(folderName + \"/\" + fileName + \"_\" + str(c) + \".txt\",", "a next-line char #ES: removing punctuation from '[00:00:01.09]' since it is never qualified", "upload a caption track in draft status. def upload_caption(youtube, video_id, language, name, file):", "always met. if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t != \"\\n\": #ES: add t to", "if uploadTranscripts == True: #print splits,videoids #uploads transcripts print \"\\nUploading transcripts...\" for s", "transcripts \" + str(c) + \" \" + captionsids[c-1] + \" to be", "== '': return a else: a = raw_input(\"Please answer 'y' or 'n', or", "for function kwargs = remove_empty_kwargs(**kwargs) # See full sample for function results =", "text snippets for syncing with your video snippets? (y) \") answer = verify_y_n_none(answer)", "logic ourselves. httplib2.RETRIES = 1 # Maximum number of times to retry before", "int(t[0])*60 + int(t[1]) splits.append([t0,t1]) t_list.append(t1) t0 = t1 elif len(t) == 3: #if", "there is a problem with playlist id, might need to create a playlist", "video snippets will created. Continue?\" print \"\\nIf all input was correct, the program", "\\\".txt\\\" extention): \") try: verifyExistence = os.stat(folderName + '/' + fileName + '.txt').st_size", "time.sleep(2) exit() print \"\\n\" videoSize = os.stat(folderName + '/' + originalVideo).st_size/1000000 answer =", "False answer = raw_input(\"\\n7.1.2 Would you like to reorganize subtitles to remove lone", "sample for function kwargs = remove_empty_kwargs(**kwargs) # See full sample for function request", "fileName = raw_input(\"Enter the file name of your transcript (excluding the \\\".txt\\\" extention):", "kwargs = remove_empty_kwargs(**kwargs) # See full sample for function request = service.videos().insert( body=resource,", "timestamps (\",':'.join(t) ,\") isn't formatted correctly. Consult README.md for guidelines on proper timestamp", "open(folderName + \"/\" + 'captionsids.pkl', 'wb') as f: pickle.dump(captionsids, f) print \"Waiting for", "#print str(t_list) for sp in splits: if num > 0: if sp[1] <=", "print \"The folder named '\" + folderName + \"' does not exist in", "import MediaFileUpload from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import", "\"title\" will be a property in that object. prop_array = p.split('.') ref =", "many videos will be uploaded. question = \"\\nThere were \" + str(len(splits)) +", "c += 1 if c > len(videoids): ffmpeg_extract_subclip(folderName + \"/\" + originalVideo, s[0],", "language, '%s' status.\" % (name, # id, language, status) c = 1 captionsids", "print \"\\n\" print \"This tool:\\n- snips your transcript (.txt) into text snippets based", "[u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0',", "for transcripts to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume", "t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this will aggregate phrases (t) into one list item", "of c in texts (a chunk of text prior to timestamp) to thefile", "'files' folder): \") try: verifyExistence = os.stat(folderName).st_size except Exception as e: print e", "+ str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) #search_response = service.search().list( # q=\"Anita\", # part=\"id\",", "you like to reorganize subtitles according to the presence of place names? (Experimental)", "transcripts to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in", "resumable_upload(request, resource, method): response = None error = None retry = 0 while", "import sys import httplib import random from apiclient.discovery import build from apiclient.errors import", "timestamp formatting.\" print \"\\nVerifying if timestamps are in ascending order...\" sp1 = 0", "ascending order and that there are no mistakes (see README.md) and restart the", "snippets...)\" time.sleep(1) for s in splits: c += 1 if c > len(videoids):", "\"snippet\" will be an object and \"title\" will be a property in that", "id, might need to create a playlist in youtube online and copy url", "\" + str(videoids) #print videoids if resumeUploads == True or deleteVideos == True", "# missing. MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\" # Authorize the request", "will be an object and \"title\" will be a property in that object.", "here. # Setting \"ref = ref[key]\" means that in the next time through", "+ int(float(t_0[2])) t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2])) return [t0,t1] def s_to_hms(seconds):", "order...\" sp1 = 0 num = 0 #print str(splits) #print str(t_list) for sp", "answer == '': deleteVideos = False answer = raw_input(\"\\n7/7 Will you be combining", "not a next-line char #ES: removing punctuation from '[00:00:01.09]' since it is never", "None error = None retry = 0 while response is None: try: print", "u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8',", "in client_id.json CLIENT_SECRETS_FILE = \"client_id.json\" # This OAuth 2.0 access scope allows for", "If so, do this manually on youtube.com and then restart the program.\" uploadVideos", "wait = False def yes_or_no(question): while \"the answer is invalid\": reply = str(raw_input(question+'", "cutting your video into video snippets (y) \") answer = verify_y_n_none(answer) if answer", "folderName + '/' + fileName + \"_\" + str(c) + \".mp4\" if not", "'', 'snippet.title': media_file, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, media_file, part='snippet,status')", "fast subtitles that are hard to read) resampleSubtitles = False #ES: IF you", "uploadVideos = True #ES: if the video/caption upload process was terminated unexpectedly before", "video and/or subtitle files\\n(this folder must be located inside the 'files' folder): \")", "t_list.append(t1) t0 = t0 + t1 else: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t1])", "sub_txt = \"\" # while waitLonger == True: # try: subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute()", "in texts (a chunk of text prior to timestamp) to thefile thefile.write(\"%s\\n\" %", "with an unexpected response: %s\" % response) except HttpError, e: if e.resp.status in", "# not yet have a \"snippet\" object. Create the snippet object here. #", "+ \"_\" + str(c) + \".txt\", 'w') as thefile: #thefile = open(folderName +", "pickle.dump(videoids, f) else: if resumeUploads == True or deleteVideos == True or uploadTranscripts", "values will be used as offsets for accurately combining your subtitle files.\\nEach timestamp", "u'V1sv1MYLdC0'] #videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q',", "the length of each video to which your subtitle files are associated. These", "'youtube#video', 'snippet.resourceId.videoId': vid, 'snippet.position': ''}, part='snippet', onBehalfOfContentOwner='') print \"Waiting for full video to", "combine_only allows some different functionality down the road combine_only = True fileName =", "#ES: the following switches control how subtitles are altered when concatenating snippets (i.e.", "answer = raw_input(\"\\n7/7 Will you be combining the downloaded subtitle snippets into a", "Would you like to reorganize subtitles to prioritize keeping full sentences intact? (Experimental;", "entered '\" + language + \"' as the language code for your transcript", "else: exit(\"The upload failed with an unexpected response: %s\" % response) except HttpError,", "of these status # codes is raised. RETRIABLE_STATUS_CODES = [500, 502, 503, 504]", "u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with open(folderName + \"/\" + 'videoids.pkl', 'wb')", "+ originalVideo).st_size/1000000 answer = raw_input(\"If this is your first time running this tool", "waitLonger = False # except: # waitLonger = True # print \"Waiting for", "'', 'snippet.title': fileName, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, folderName +", "return build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http())) # Explicitly tell the underlying HTTP transport library not", "the transcript's timestamps (must be set to True for other processes to run)", "is called when videos are being uploaded (uploadVideos = True) to warn the", "newline.\\n\\nPlease enter the file name of your timestamp list (excluding the \\\".txt\\\" extention):", "them. If this does not apply to your transcript, simply leave the following", "specifies the name of a file that contains # the OAuth 2.0 information", "#originalVideo refers to the name of the video file including its ext #originalVideo", "folder name called oscar #change certain character variables import imageio imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools", "have entered '\" + language + \"' as the language code for your", "CODE # Sample python code for videos.insert def videos_insert(properties, media_file, **kwargs): resource =", "== True or deleteVideos == True: combine_only = False fileName = raw_input(\"Enter the", "will begin snipping and uploading content to Youtube for processing. This may take", "vid def hms_to_s(time): time = unicode(time, \"UTF-8\") time = time.split(\" --> \") t_0", "raw_input(\"\\n5/7 Will you be downloading the generated subtitle snippets from Youtube? (y) \")", "\"snippet.description\", and the resource # already has a \"snippet\" object. ref = ref[key]", "= True answer = raw_input(\"\\n5/7 Will you be downloading the generated subtitle snippets", "occurred: %s\" % e if error is not None: print error retry +=", "until a timestamp is reached #ES: if t is a timestamp #ES: removing", "retry when these exceptions are raised. RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState,", "you have the option to make subtitle entries full sentences (not recommended, since", "fullSentenceSubtitles = False #ES: IF you enabled 'resampleSubtitles' (above), you have the option", "\"/\" + fileName + \".txt\") as f: text = f.readlines() except IOError as", "request and store authorization credentials. def get_authenticated_service(args): flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage", "line (why?) t += \"\\n\" #ES: if the beginning of the line is", "example, the property is \"snippet.description\", and the resource # already has a \"snippet\"", "is \"snippet.description\", and the resource # already has a \"snippet\" object. ref =", "+ \".srt\" service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=id, language=language, name=originalVideo, isDraft=True, sync=False ) ),", "strip whitespace text = [x.strip() for x in text] #split times (?) splits", "#api key is <KEY> #client id is in client_id.json CLIENT_SECRETS_FILE = \"client_id.json\" #", "the language code of your video and transcript or the intended language code", "== True: print \"\\nResuming video uploads...\\n\" time.sleep(1) try: with open(folderName + \"/\" +", "the subtitle structure overall (can lead to short, choppy, fast subtitles that are", "for youtube api click \"YouTube Data API v3\" and ENABLE it click \"create", "= sp num+=1 print \"\\nThe document named '\" + fileName + \".txt' was", "f: pickle.dump(videoids, f) else: if resumeUploads == True or deleteVideos == True or", "contains # the OAuth 2.0 information for this application, including its client_id and", "\"\\nThe video IDs are composed of the following: \" + str(videoids) #print videoids", "your timestamps are in ascending order and that there are no mistakes (see", "require \" + str(videoSize) + \" Mb available space on your hard drive", "not in ref: # For example, the property is \"snippet.title\", but the resource", "maximum of 100 video uploads per 24h using the current API credentials. Continue?\"", "t = t.replace('[','').replace(']','').replace('\\n','') t = unicode(t, \"UTF-8\") #split the timestamps at : (into", "+ str(c) + \".flv\" caption_file = folderName + '/' + fileName + \"_\"", "\".mp4\" if not os.path.exists(media_file): exit('Please specify a valid file location.') vid = videos_insert(", "then retrying...\" % sleep_seconds time.sleep(sleep_seconds) return response['id'] if uploadTranscripts == True or resumeUploads", "= properties[p] elif key not in ref: # For example, the property is", "print \"This application creates subtitles for a video for which you have an", "so as to make sure that place names are never split between 2", "and t != \"\\n\": #ES: add t to position c of texts texts[c]", "'snipTranscript' to True so that the code can properly run.\" exit() #ES print", "# Given a key like \"snippet.title\", split into \"snippet\" and \"title\", where #", "full sentences intact? (Experimental; this feature is not recommended since subtitle units tend", "== 'n': resumeUploads = False elif answer == '': resumeUploads = False answer", "#fileName = 'Berthe' #originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer = \"S.G.\" #interviewee = \"O.G.\" #folderName", "+ str(int(m)) + \":\" + str(int(s)) return str(int(h)) + \":\" + str(int(m)) +", "resumable_upload(request, 'video', 'insert') # See full sample for function return vid def hms_to_s(time):", "we are using a .txt file with a list of video lengths, then", "t += \"\\n\" #ES: if the beginning of the line is not a", "deleteVideos = False #ES: upload the full video and compiled transcript to your", "= True with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f)", "[] wait = False if uploadTranscripts == True: #print splits,videoids #uploads transcripts print", "= False elif answer == '': downloadCaptions = True answer = raw_input(\"\\n6/7 Would", "== '' or 'y': break #if combineSubtitles == True: print \"\\n\\n\" print \"\\n6.3", "be in the same directory with the code. return build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http())) #", "as key-value pairs. # Leave properties with empty values out of the inserted", "may take between 20 minutes and several hours, depending on the size of", "like to reorganize subtitles according to the presence of place names? (Experimental) (n)", "snippets to Youtube for syncing? (y) \") answer = verify_y_n_none(answer) if answer ==", "transcript, simply leave the following two answers blank by pressing the 'Enter' key.\"", "are uploading are identical. If so, do this manually on youtube.com and then", "splits: c += 1 if c > len(videoids): ffmpeg_extract_subclip(folderName + \"/\" + originalVideo,", "'': uploadTranscripts = True answer = raw_input(\"\\n5/7 Will you be downloading the generated", "u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f:", "#change certain character variables import imageio imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip import time", "print e print \"The file named '\" + originalVideo + \"' does not", "list of timestamps demarcating the length of each video to which your subtitle", "'on' or 'off' depending on which steps you would like to run. If", "per 24h using the current API credentials. Continue?\" print \"\\nIf all input was", "splits: print c,videoids[c-1] service.videos().delete( id=videoids[c-1] ).execute() c += 1 time.sleep(10) if combineSubtitles ==", "for these video snippets,\\n- allows Youtube to sync the video and text snippets\\n-", "'': deleteVideos = False answer = raw_input(\"\\n7/7 Will you be combining the downloaded", "blank. For more advanced users or users who have already used this tool,", "questions...\\\"), please input them. If this does not apply to your transcript, simply", "#fileName = 'Frederic' #originalVideo = \"Frederic.mov\" #interviewer = \"M.M.\" #interviewee = \"B.K.\" #fileName", "= {} ref = ref[key] else: # For example, the property is \"snippet.description\",", "= pickle.load(f) except Exception as e: print e print \"\\nThe program is unable", "print e print \"\\nThe program is unable to resume uploads because there are", "name, file): insert_result = youtube.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=video_id, language=language, name=name, isDraft=True )", "3: print \"\\nOne of your timestamps (\",':'.join(t) ,\") isn't formatted correctly. Consult README.md", "file location.') print \"\\nSnipping completed. No further options were selected. Exiting...\" exit() #ES:", "status) c = 1 captionsids = [] wait = False if uploadTranscripts ==", "'id' not in response: print response else: exit(\"The upload failed with an unexpected", "not None: for key, value in kwargs.iteritems(): if value: good_kwargs[key] = value return", "= get_authenticated_service(args) def print_results(results): print(results) # Build a resource based on a list", "\"snippet.tags[]\" to snippet.tags, but handle # the value as an array. if key[-2:]", "print \"\\n1. Slicing into \" + str(len(splits)) + \" parts\" time.sleep(1) for s", "type=\"video\", # fields=\"items/id\" #).execute() # #videos = [] # #for search_result in search_response.get(\"items\",", "Please input your interviewee's name as it appears in the transcript: \") print", "= prop_array[pa] # Convert a name like \"snippet.tags[]\" to snippet.tags, but handle #", "originalVideo + \"' does not exist in the folder '\" + folderName +", "users or users who have already used this tool, please select which processes", "= properties[p].split(',') else: ref[key] = properties[p] elif key not in ref: # For", "str(c) + \".txt\", 'w') as thefile: #thefile = open(folderName + \"/\" + fileName", "put them in an adjacent subtitle (verify)) removeLoneWords = False #____________# #ES: USER", "= \"\" if c < 10: cc = \"0\" + str(c) else: cc", "can be set to True or False (make a variable for this) compiledSubs", "interviewee's name as it appears in the transcript: \") print \"\\n\" #____________# #", "#ES A feature created by RG that has yet to be explored... placeBasedTimestamping", "= 1 # Maximum number of times to retry before giving up. MAX_RETRIES", "#print str(int(h)) + \":\" + str(int(m)) + \":\" + str(int(s)) return str(int(h)) +", "to upload a caption track in draft status. def upload_caption(youtube, video_id, language, name,", "combineSubtitles = False elif answer == '': combineSubtitles = True if combineSubtitles ==", "= False if resampleSubtitles == True: answer = raw_input(\"\\n7.1.1 Would you like to", "and get credntials like keys\" search for youtube api click \"YouTube Data API", "to timestamp) to thefile thefile.write(\"%s\\n\" % texts[c-1]) #time.sleep(.1) texts.append(\"\") texts[c] = \"\" #t", "= 0 #ES: several print commands were added for guidance. they can be", "video snippets to Youtube for syncing? (y) \") answer = verify_y_n_none(answer) if answer", "(e.g. en, fr, es, etc.):\\n(You can refer to the second column in http://www.loc.gov/standards/iso639-2/php/code_list.php", "YouTube for processing. YouTube allows a maximum of 100 video uploads per 24h", "len(t) > 3 or len(t) < 3: print \"\\nOne of your timestamps (\",':'.join(t)", "the file name of your video (this time including the file's extention): \")", "snipVideos = False elif answer == '': snipVideos = True answer = raw_input(\"\\n2/7", "have the option to make subtitle entries full sentences (not recommended, since some", "combineSubtitles = True elif answer == 'n': combineSubtitles = False elif answer ==", "the name of the input .txt file (excluding .txt) #fileName = 'venant' #originalVideo", "\"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer = \"S.G.\" #interviewee = \"O.G.\" #folderName = 'oscar' #fileName = 'oscar'", "# #videos = [] # #for search_result in search_response.get(\"items\", []): # videos.append(\"%s\" %", "end up being excessively large) fullSentenceSubtitles = False #ES: IF you enabled 'resampleSubtitles'", "#ES: delete uploaded video snippets from your Youtube account once subtitle processing is", "as e: print e print \"The file named '\" + originalVideo + \"'", "if resumeUploads == True or deleteVideos == True or uploadTranscripts == True: with", "Script will resume in \" + str(2 * sleepingTime / 60) + \"", "\" + str(len(splits)) + \" video snippets will therefore be uploaded to YouTube", "videos...\" time.sleep(1) if len(videoids) > 0: print \"(However, it looks like \",len(videoids),\" video", "== 'y': uploadVideos = True snipVideos = True elif answer == 'n': uploadVideos", "to YouTube for processing. YouTube allows a maximum of 100 video uploads per", "answer = raw_input(\"\\n5/7 Will you be downloading the generated subtitle snippets from Youtube?", "of cut-up texts texts = [\"\"] t0 = 0 c = 0 #ES:", "True or False (make a variable for this) compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile", "place names? (Experimental) (n) \") answer = verify_y_n_none(answer) if answer == 'y': placeBasedTimestamping", "search for youtube api click \"YouTube Data API v3\" and ENABLE it click", "text = [x.strip() for x in text] #split times (?) splits = []", "'snippet.position': ''}, part='snippet', onBehalfOfContentOwner='') print \"Waiting for full video to be processed. It", "pressing the 'Enter' key.\" time.sleep(1) interviewer = raw_input(\"\\n6.3.1 Please input your interviewer's name", "tend to become excessively long) (n) \") answer = verify_y_n_none(answer) if answer ==", "'y': removeLoneWords = True elif answer == 'n': removeLoneWords = False elif answer", "processing your files. Continue? (y/n) \") if verifyLanguage.lower() == '' or 'y': break", "answer = raw_input(\"\\n6/7 Would you like your uploaded video snippets to be deleted", "== 'y': placeBasedTimestamping = True elif answer == 'n': placeBasedTimestamping = False elif", "break #if combineSubtitles == True: print \"\\n\\n\" print \"\\n6.3 If your transcript has", "project on the resulting dashboard, \"enable apis and get credntials like keys\" search", "this manually on youtube.com and then restart the program.\" uploadVideos = True wait", "fullSentenceSubtitles = True elif answer == 'n': fullSentenceSubtitles = False elif answer ==", "print \"\\nIf all input was correct, the program will begin snipping and uploading", "c,s,captionsids[c-1] sub_txt = \"\" # while waitLonger == True: # try: subtitle =", "application...\" time.sleep(2) exit() print \"\\n\" originalVideo = raw_input(\"Enter the file name of your", "True or downloadCaptions == True or deleteVideos == True: args = argparser.parse_args() service", "downloadCaptions == True or deleteVideos == True: combine_only = False fileName = raw_input(\"Enter", "answer blank by hitting 'Enter': \") continue print \"\\n\\n\" print \"This application creates", "a valid file location.') print \"\\nSnipping completed. No further options were selected. Exiting...\"", "the program.\" uploadVideos = True wait = False def yes_or_no(question): while \"the answer", "if num > 0: if sp[1] <= sp1[1]: print \"\\nThere is a problem", "a few questions...\\\"), please input them. If this does not apply to your", "off (uploadVideos must still be set to True): resumeUploads = False #ES: upload", "simply leave the following answers blank. For more advanced users or users who", "processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(2)", "script #playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language = 'fr' #change these variables according to what", "online and copy url id to script #playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language = 'fr'", "stored #folderName = 'venant' #fileName refers to the name of the input .txt", "directory. Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\"", "+ fileName + \"_\" + str(cc) + \".vtt\", 'w') as thefile: #thefile.write(sub_txt) thefile.write(subtitle)", "object here. # Setting \"ref = ref[key]\" means that in the next time", "uploadVideos == False and snipVideos == True: #ES: the following is called when", "of your timestamps (\",':'.join(t) ,\") isn't formatted correctly. Consult README.md for guidelines on", "offsets for accurately combining your subtitle files.\\nEach timestamp should be written as follows", "= 'oscar' #fileName = 'oscar' #originalVideo = \"Oscar.mp4\" ### START BOILERPLATE CODE #", "splits.append([t0,t0+t1]) t_list.append(t1) t0 = t0 + t1 else: t1 = int(t[0])*60 + int(t[1])", "int(t[1])*60 + float(t[2]) splits.append([t0,t0+t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 =", "\".mp4\" if not os.path.exists(media_file): exit('Please specify a valid file location.') print \"\\nSnipping completed.", "certain character variables import imageio imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip import time from", "from a previously-initiated process? (n) \") answer = verify_y_n_none(answer) if answer == 'y':", "True elif answer == 'n': placeBasedTimestamping = False elif answer == '': placeBasedTimestamping", "\") answer = verify_y_n(answer) if answer == \"n\": print \"Please make sure you", "to remove subtitle entries which may be a single word (and put them", "# This OAuth 2.0 access scope allows for full read/write access to the", "s in splits: print c,s media_file = folderName + '/' + fileName +", "answer == '': resampleSubtitles = False if resampleSubtitles == True: answer = raw_input(\"\\n7.1.1", "open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: # pickle.dump(videoids, f) if resumeUploads", "while response is None: try: print \"Uploading file...\" status, response = request.next_chunk() if", "account once complete uploadFull = False #ES: combine vtt snippets that were downloaded", "# Setting \"ref = ref[key]\" means that in the next time through the", "this application. Exiting...\" exit() while True: language = raw_input(\"Enter the language code of", "code for user authorization import httplib2 import os import sys import httplib import", "3: #if we are only combining subtitle files, and we are using a", "raw_input(\"\\n7.1.1 Would you like to reorganize subtitles to prioritize keeping full sentences intact?", "refer to the second column in http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate two-letter 'ISO 639-1'", "\"Oscar.mp4\" ### START BOILERPLATE CODE # Sample Python code for user authorization import", "secret file: google apis dashboard --> create a new project on the resulting", "u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU',", "#print \"Wait\" #time.sleep(30) c = 0 #print splits videoids = [] #videoids =", "\"Frederic.mov\" #interviewer = \"M.M.\" #interviewee = \"B.K.\" #fileName = 'Berthe' #originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\"", "str(c) #print subtitle print cc with open(folderName + \"/\" + fileName + \"_\"", "full video to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \"", "sp1[1]: print \"\\nThere is a problem with one of your timestamps:\" print \"Timestamp", "a timestamp #ES: removing punctuation from '[00:00:01.09]' since it is never qualified as", "'': return True if reply[0] == 'n': exit() if uploadVideos == False and", "[] #ES: PREPARE INPUT TEXT FOR PROCESSING if snipTranscript == True: for t", "to retry, since # we are handling retry logic ourselves. httplib2.RETRIES = 1", "#ES print texts[c] #print \"splits: \" + str(splits) #for i in splits: #", "uploads because there are no uploads to resume or your 'videoids.pkl' file has", "True with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) else:", "+ '/' + originalVideo).st_size except Exception as e: print e print \"The file", "retry += 1 if retry > MAX_RETRIES: exit(\"No longer attempting to retry.\") max_sleep", "BOILERPLATE CODE # Sample python code for videos.insert def videos_insert(properties, media_file, **kwargs): resource", "resumeUploads == True or deleteVideos == True or uploadTranscripts == True: with open(folderName", "answer == 'y': downloadCaptions = True elif answer == 'n': downloadCaptions = False", "except HttpError, e: if e.resp.status in RETRIABLE_STATUS_CODES: error = \"A retriable HTTP error", "print \"\\nThe program is unable to resume uploads because there are no uploads", "), media_body=file ).execute() id = insert_result[\"id\"] name = insert_result[\"snippet\"][\"name\"] language = insert_result[\"snippet\"][\"language\"] status", "of the folder containing your transcript and/or video and/or subtitle files\\n(this folder must", "key[-2:] == '[]': key = key[0:len(key)-2:] is_array = True if pa == (len(prop_array)", "timestamps:\" print \"Timestamp number #\",str(num+2),\" (equivalent to \",str(sp[1]),\" seconds) should be a larger", "specify a valid file location.') vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage':", "= False def yes_or_no(question): while \"the answer is invalid\": reply = str(raw_input(question+' (y/n):", "sample for function kwargs = remove_empty_kwargs(**kwargs) # See full sample for function results", "wait = True with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids,", "# See full sample for function kwargs = remove_empty_kwargs(**kwargs) # See full sample", "including the file's extention): \") try: verifyExistence = os.stat(folderName + '/' + originalVideo).st_size", "the files you have indicated, you will temporarily require \" + str(videoSize) +", "s[0], s[1], targetname=folderName + \"/\" + fileName + \"_\" + str(c) +\".mp4\") media_file", "'%s' was successfully uploaded.\" % response['id'] videoid = response['id'] elif method != 'insert'", "open anita/Anita.txt as myfile try: with open(folderName + \"/\" + fileName + \".txt\",", "from the developers page # and it should be in the same directory", "elif answer == 'n': resampleSubtitles = False elif answer == '': resampleSubtitles =", "for other processes to run) snipVideos = True #ES: upload video snippets uploadVideos", "not running the entire pipeline. Creating dummy file 'delete me.txt' to finish pipeline.\"", "= request.next_chunk() if response is not None: if method == 'insert' and 'id'", "build_resource(properties) # See full sample for function kwargs = remove_empty_kwargs(**kwargs) # See full", "'videoids.pkl', 'rb') as f:videoids = pickle.load(f) except Exception as e: print e print", "Youtube once subtitles have been successfully generated? (n) \") answer = verify_y_n_none(answer) if", "was terminated unexpectedly before and you want to continue where you left off", "not os.path.exists(media_file): exit('Please specify a valid file location.') print \"\\nSnipping completed. No further", "True #ES: download snippet subtitle files (.vtt) downloadCaptions = True #ES: delete uploaded", "int(t[2]) t_list.append(t1) t0 = t1 except ValueError as e: print e print \"\\n", "for guidelines on proper timestamp formatting.\" print \"\\nexiting application...\" time.sleep(2) exit() if len(t)", "1 c += 1 #ES: printing deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c: \" +", "to reorganize subtitles to remove lone words? (Experimental) (n) \") answer = verify_y_n_none(answer)", "for full read/write access to the # authenticated user's account and requires requests", "+ fileName + \"_\" + str(c) + \".flv\" caption_file = folderName + '/'", "application...\" time.sleep(2) exit() print \"\\n\" if snipVideos == True or uploadTranscripts == True", "the size of your video file (\" + str(videoSize) + \" Mb).\" yes_or_no(question)", "answer = raw_input(\"\\n3/7 Will you be resuming video uploads from a previously-initiated process?", "== '': combineSubtitles = True if combineSubtitles == True: answer = raw_input(\"\\n7.1 Would", "user's account and requires requests to use an SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\"", "\") answer = verify_y_n_none(answer) if answer == 'y': uploadTranscripts = True elif answer", "underlying HTTP transport library not to retry, since # we are handling retry", "response: %s\" % response) except HttpError, e: if e.resp.status in RETRIABLE_STATUS_CODES: error =", "num = 0 #print str(splits) #print str(t_list) for sp in splits: if num", "further options were selected. Exiting...\" exit() #ES: UPLOADS THE VIDEOS if uploadVideos ==", "argparser, run_flow # The CLIENT_SECRETS_FILE variable specifies the name of a file that", "videos...\\n\" c = 1 for s in splits: print c,videoids[c-1] service.videos().delete( id=videoids[c-1] ).execute()", "= verify_y_n_none(answer) if answer == 'y': resumeUploads = True elif answer == 'n':", "imageio imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip import time from time import strftime,localtime from", "will be setting a property in the # resource's \"snippet\" object. ref[key] =", "== 'y': resumeUploads = True elif answer == 'n': resumeUploads = False elif", "dummy file 'delete me.txt' to finish pipeline.\" foo = open(folderName + \"/\" +", "open(folderName + \"/\" + fileName + \"_\" + str(c) + \".txt\", 'w') try:", "if uploadVideos == True: #ES: the following is called when videos are being", "run: \\n\\n\" time.sleep(2) answer = raw_input(\"\\n1/7 Will you be cutting your video into", "set to True for other processes to run) snipVideos = True #ES: upload", "timestamp) to thefile thefile.write(\"%s\\n\" % texts[c-1]) #time.sleep(.1) texts.append(\"\") texts[c] = \"\" #t =", "video file including its ext #originalVideo = \"venant.mp4\" #interviewer = \"E.H.\" #interviewee =", "a single subtitle file for your video.\\n\\nYou may switch these processes 'on' or", "what story you want to process - ES #interviewer = \"C.V.\" #interviewee =", "into \" + str(len(splits)) + \" parts\" time.sleep(1) for s in splits: c", "if downloadCaptions == True: with open(folderName + \"/\" + 'captionsids.pkl', 'rb') as f:", "= 0 while response is None: try: print \"Uploading file...\" status, response =", "application at any point by pressing Ctrl+C (Cmd+C on Mac).\" time.sleep(1) print \"\\n\"", "\") answer = verify_y_n_none(answer) if answer == 'y': uploadVideos = True snipVideos =", "\"Please make sure your timestamps are in ascending order and that there are", "or users who have already used this tool, please select which processes you", "composed of the following: \" + str(videoids) #print videoids if resumeUploads == True", "for syncing? (y) \") answer = verify_y_n_none(answer) if answer == 'y': uploadVideos =", "True elif answer == 'n': deleteVideos = False elif answer == '': deleteVideos", "\"M.M.\" #interviewee = \"B.K.\" #fileName = 'Berthe' #originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer = \"S.G.\"", "= \"v3\" # This variable defines a message to display if the CLIENT_SECRETS_FILE", "elif method != 'insert' or 'id' not in response: print response else: exit(\"The", "+ str(videoSize) + \" Mb).\" yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits))", "as transcript files for these video snippets,\\n- allows Youtube to sync the video", "function is ever called... # Call the API's captions.insert method to upload a", "print \"\\nFull video is soon available on your Youtube channel for you to", "True: args = argparser.parse_args() service = get_authenticated_service(args) def print_results(results): print(results) # Build a", "me.txt' to finish pipeline.\" foo = open(folderName + \"/\" + \"delete me.txt\",\"w+\") foo.close()", "to warn the user as to how many videos will be uploaded. question", "processes 'on' or 'off' depending on which steps you would like to run.", "unicode(t, \"UTF-8\") #split the timestamps at : (into 3) t = t.split(\":\") if", "media_file, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, media_file, part='snippet,status') videoids.append(vid) print", "== 'n': return a else: a = raw_input(\"Please answer 'y' or 'n': \")", "str(c) + \".txt\", 'w') try: #ES: write the previous position of c in", "\"\\n\".join(videos), \"\\n\" #ES: I don't think this function is ever called... # Call", "\"UTF-8\") #split the timestamps at : (into 3) t = t.split(\":\") if len(t)", "sentences intact? (Experimental; this feature is not recommended since subtitle units tend to", "and/or subtitle files\\n(this folder must be located inside the 'files' folder): \") try:", "then restart the program.\" uploadVideos = True wait = False def yes_or_no(question): while", "the code. return build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http())) # Explicitly tell the underlying HTTP transport", "False answer = raw_input(\"\\n7.2 Would you like to reorganize subtitles according to the", "= \"C.V.\" #interviewee = \"V.S.\" #where the video and txt files are stored", "= int(t[0])*60 + int(t[1]) splits.append([t0,t1]) t_list.append(t1) t0 = t1 elif len(t) == 3:", "in \" + str(2 * sleepingTime / 60) + \" minutes...\" time.sleep(2 *", "h, m = divmod(m, 60) #print str(int(h)) + \":\" + str(int(m)) + \":\"", "named '\" + folderName + \"' does not exist in the current directory.", "apiclient.errors.HttpError with one of these status # codes is raised. RETRIABLE_STATUS_CODES = [500,", "\"Timestamp number #\",str(num+2),\" (equivalent to \",str(sp[1]),\" seconds) should be a larger number than", "'y': return True if reply[0] == '': return True if reply[0] == 'n':", "pos on texts by 1 c += 1 #ES: printing deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit()", "+ \".flv\" caption_file = folderName + '/' + fileName + \"_\" + str(c)", "who have already used this tool, please select which processes you would like", "= time[0].split(\":\") t_1 = time[1].split(\":\") t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2])) t1", "#split times (?) splits = [] #list of cut-up texts texts = [\"\"]", "#____________# #ES: USER INTERVIEW SECTION def verify_y_n(a): while True: a = a.lower().strip() if", "= \"E.M.\" #fileName = 'Frederic' #originalVideo = \"Frederic.mov\" #interviewer = \"M.M.\" #interviewee =", "in response: print response else: exit(\"The upload failed with an unexpected response: %s\"", "except RETRIABLE_EXCEPTIONS, e: error = \"A retriable error occurred: %s\" % e if", "requests to use an SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME = \"youtube\" API_VERSION", "etc.):\\n(You can refer to the second column in http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate two-letter", "+ str(2 * sleepingTime / 60) + \" minutes...\" time.sleep(2 * sleepingTime) else:", "minutes...\" time.sleep(sleepingTime) #search_response = service.search().list( # q=\"Anita\", # part=\"id\", # type=\"video\", # fields=\"items/id\"", "to become excessively long) (n) \") answer = verify_y_n_none(answer) if answer == 'y':", "#playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language = 'fr' #change these variables according to what story", "verifyLanguage.lower() == '' or 'y': break #if combineSubtitles == True: print \"\\n\\n\" print", ") ), media_body=caption_file ).execute() print \"\\nFull video is soon available on your Youtube", "resampleSubtitles = False if resampleSubtitles == True: answer = raw_input(\"\\n7.1.1 Would you like", "into one list item (a text) until a timestamp is reached #ES: if", "exit() print \"\\n\" if snipVideos == True or uploadTranscripts == True or resumeUploads", "u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo',", "'n': fullSentenceSubtitles = False elif answer == '': fullSentenceSubtitles = False answer =", "#ES: a list of the transcript's timestamps t_list = [] #ES: PREPARE INPUT", "API_VERSION = \"v3\" # This variable defines a message to display if the", "return [t0,t1] def s_to_hms(seconds): m, sec = divmod(seconds, 60) h, m = divmod(m,", "formatted correctly. Consult README.md for guidelines on proper timestamp formatting.\" print \"\\nVerifying if", "'videoids.pkl', 'wb') as f: # pickle.dump(videoids, f) if resumeUploads == True: print \"\\nResuming", "service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=id, language=language, name=originalVideo, isDraft=True, sync=False ) ), media_body=caption_file ).execute()", "up being excessively large) fullSentenceSubtitles = False #ES: IF you enabled 'resampleSubtitles' (above),", "be uploading video snippets to Youtube for syncing? (y) \") answer = verify_y_n_none(answer)", "into video snippets (y) \") answer = verify_y_n_none(answer) if answer == 'y': snipVideos", "os import sys import httplib import random from apiclient.discovery import build from apiclient.errors", "time.sleep(10) if combineSubtitles == True: #compiles them all print \"\\nCombining subtitle snippets ...\"", "\") try: verifyExistence = os.stat(folderName).st_size except Exception as e: print e print \"The", "be a larger number than the timestamp that comes before it (\",str(sp1[1]),\" seconds),", "processing. This may take between 20 minutes and several hours, depending on the", "refers to the name of the video file including its ext #originalVideo =", "== 'y': fullSentenceSubtitles = True elif answer == 'n': fullSentenceSubtitles = False elif", "file location.') vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage': language, 'snippet.description': 'Description", "os.stat(folderName + '/' + originalVideo).st_size/1000000 answer = raw_input(\"If this is your first time", "are raised. RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine)", "\"A retriable HTTP error %d occurred:\\n%s\" % (e.resp.status,e.content) else: raise except RETRIABLE_EXCEPTIONS, e:", "= open(folderName + \"/\" + \"delete me.txt\",\"w+\") foo.close() with open(folderName + \"/\" +", "uploaded (uploadVideos = True) to warn the user as to how many videos", "to make subtitle entries full sentences (not recommended, since some timestamp/subtitle units can", "url id to script #playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language = 'fr' #change these variables", "if uploadFull == True: print \"\\nUploading full video...\" vid = videos_insert( {'snippet.categoryId': '22',", "sleep_seconds time.sleep(sleep_seconds) return response['id'] if uploadTranscripts == True or resumeUploads == True or", "processes to run) snipTranscript = True #ES: cut video into snippets based on", "= t1 except ValueError as e: print e print \"\\n One of your", "for processing. This may take between 20 minutes and several hours, depending on", "fullSentenceSubtitles = False elif answer == '': fullSentenceSubtitles = False answer = raw_input(\"\\n7.1.2", "% (name, # id, language, status) c = 1 captionsids = [] wait", "{'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId': vid, 'snippet.position': ''}, part='snippet', onBehalfOfContentOwner='') print \"Waiting for full video", "are composed of the following: \" + str(videoids) #print videoids if resumeUploads ==", "creates subtitles for a video for which you have an associated transcript. Make", "authorization credentials. def get_authenticated_service(args): flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage = Storage(\"youtube-api-snippets-oauth2.json\") credentials", "id\" \"\"\" #CLIENT_SECRETS_FILE = \"client_secret.json\" #api key is <KEY> #client id is in", "transcript: \") interviewee = raw_input(\"\\n6.3.2 Please input your interviewee's name as it appears", "True: #compiles them all print \"\\nCombining subtitle snippets ...\" #ES: this is a", "variable 'snipTranscript' to True so that the code can properly run.\" exit() #ES", "= service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger = False # except: # waitLonger = True #", "snippet.tags, but handle # the value as an array. if key[-2:] == '[]':", "True elif answer == 'n': uploadTranscripts = False elif answer == '': uploadTranscripts", "and \"OAUT client id\" \"\"\" #CLIENT_SECRETS_FILE = \"client_secret.json\" #api key is <KEY> #client", "on your hard drive to run this program. Continue? (y/n) \") answer =", "as follows [HH:MM:SS.00], followed by a newline.\\n\\nPlease enter the file name of your", "as myfile: text = myfile.read().replace('\\n', '') with open(folderName + \"/\" + \"delete me.txt\")", "t_list.append(t1) t0 = t1 except ValueError as e: print e print \"\\n One", "information for this application, including its client_id and # client_secret. \"\"\" to create", "open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) if wait ==", "httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine) # Always retry when an apiclient.errors.HttpError", "id, language, status) c = 1 captionsids = [] wait = False if", "compiled subtitles...\" caption_file = folderName + '/' + fileName + \".srt\" service.captions().insert( part=\"snippet\",", "should be in the same directory with the code. return build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http()))", "+ \"/\" + fileName + \".txt\") as f: text = f.readlines() except IOError", "= True elif answer == 'n': snipVideos = False elif answer == '':", "you would like to run. If this is your first time running the", "+ str(len(splits)) + \" video snippets will therefore be uploaded to YouTube for", "their discourse (e.g. \\\"Emmanuel: Hi, I'd like to ask you a few questions...\\\"),", "item (a text) until a timestamp is reached #ES: if t is a", "is soon available on your Youtube channel for you to check and adjust", "\"delete me.txt\") as f: text = f.readlines() pass #print \"ES: text is the", "print \"\\nResuming video uploads...\\n\" time.sleep(1) try: with open(folderName + \"/\" + 'videoids.pkl', 'rb')", "to True for other processes to run) snipTranscript = True #ES: cut video", "if the videos you are uploading are identical. If so, do this manually", "timestamp formatting.\" print \"\\nexiting application...\" time.sleep(2) exit() if len(t) == 2: if combine_only", "+ \"/\" + 'captionsids.pkl', 'rb') as f: captionsids = pickle.load(f) #if wait ==", "already used this tool, please select which processes you would like to run:", "1 captionsids = [] wait = False if uploadTranscripts == True: #print splits,videoids", "\"\\nFull video is soon available on your Youtube channel for you to check", "Exiting...\" exit() while True: language = raw_input(\"Enter the language code of your video", "same directory with the code. return build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http())) # Explicitly tell the", "# Sample python code for videos.insert def videos_insert(properties, media_file, **kwargs): resource = build_resource(properties)", "following: \" + str(videoids) #print videoids if resumeUploads == True or deleteVideos ==", "subtitles according to the presence of place names? (Experimental) (n) \") answer =", "\"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) if wait == True: print", "= open(folderName + \"/\" + fileName + \".srt\", 'w') #thefile.write(compiledSubs) if uploadFull ==", "Ctrl+C (Cmd+C on Mac).\" time.sleep(1) print \"\\n\" print \"This tool:\\n- snips your transcript", "language code of your subtitles (e.g. en, fr, es, etc.):\\n(You can refer to", "= False #ES: IF you enabled 'resampleSubtitles' (above), you have the option to", "the OAuth 2.0 information for this application, including its client_id and # client_secret.", "service = get_authenticated_service(args) def print_results(results): print(results) # Build a resource based on a", "Authorize the request and store authorization credentials. def get_authenticated_service(args): flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE,", "videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"])) # #print \"Videos:\\n\", \"\\n\".join(videos), \"\\n\" #ES: I don't think this", "== True: print \"\\n\\n\" print \"\\n6.3 If your transcript has speaker names (e.g.", "part=\"snippet\", body=dict( snippet=dict( videoId=videoids[c-1], language=language, name=media_file, isDraft=True, sync=True ) ), media_body=caption_file ).execute() captionsids.append(a['id'])", "time.sleep(2 * sleepingTime) else: if downloadCaptions == True: with open(folderName + \"/\" +", "located inside the 'files' folder): \") try: verifyExistence = os.stat(folderName).st_size except Exception as", "python code for videos.insert def videos_insert(properties, media_file, **kwargs): resource = build_resource(properties) # See", "q=\"Anita\", # part=\"id\", # type=\"video\", # fields=\"items/id\" #).execute() # #videos = [] #", "int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1)", "answer = raw_input(\"\\n7.1.2 Would you like to reorganize subtitles to remove lone words?", "True: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t0+t1]) #print int(t[0])*3600 + int(t[1])*60", "missing. The program will restart by uploading all videos. You may need to", "\"\\n\\n\" print \"\\n6.3 If your transcript has speaker names (e.g. the interviewer or", "!= \"\\n\": #ES: add t to position c of texts texts[c] += t#.encode('utf8')", "if uploadTranscripts == True or resumeUploads == True or downloadCaptions == True or", "before and you want to continue where you left off (uploadVideos must still", "enter the file name of your timestamp list (excluding the \\\".txt\\\" extention): \")", "in same folder #transcript text file \"oscar4.txt\" #might have to be in a", "(see README.md) and restart the program.\" exit() sp1 = sp num+=1 print \"\\nThe", "snipTranscript == True: for t in text: #add a \\n to the end", "if downloadCaptions == True: print \"\\nDownloading captions...\" c = 1 waitLonger = True", "timestamp/subtitle units can end up being excessively large) fullSentenceSubtitles = False #ES: IF", "proper timestamp formatting.\" print \"\\nVerifying if timestamps are in ascending order...\" sp1 =", "before proceeding.\" time.sleep(1) print \"You may terminate the application at any point by", "may be a single word (and put them in an adjacent subtitle (verify))", "== '': fullSentenceSubtitles = False answer = raw_input(\"\\n7.1.2 Would you like to reorganize", "\"exiting application...\" time.sleep(2) exit() print \"\\n\" videoSize = os.stat(folderName + '/' + originalVideo).st_size/1000000", "raw_input(\"In order to accurately combine subtitle files, you will need to create a", "+ str(c) + \".txt\", 'w') as thefile: #thefile = open(folderName + \"/\" +", "u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc',", "input them. If this does not apply to your transcript, simply leave the", "str(c) +\".mp4\") media_file = folderName + '/' + fileName + \"_\" + str(c)", "#ES: if the video/caption upload process was terminated unexpectedly before and you want", "f: text = f.readlines() pass #print \"ES: text is the following\" + str(text)", "== '': resampleSubtitles = False if resampleSubtitles == True: answer = raw_input(\"\\n7.1.1 Would", "answers blank by pressing the 'Enter' key.\" time.sleep(1) interviewer = raw_input(\"\\n6.3.1 Please input", "of uploaded video.', 'snippet.tags[]': '', 'snippet.title': fileName, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted',", "your video snippets? (y) \") answer = verify_y_n_none(answer) if answer == 'y': uploadTranscripts", "subtitle snippets into a single subtitle file for your video? (y) \") answer", "response['id'] if uploadTranscripts == True or resumeUploads == True or downloadCaptions == True", "or uploadTranscripts == True or resumeUploads == True or downloadCaptions == True or", "= True # print \"Waiting for transcripts \" + str(c) + \" \"", "a folder name called oscar #change certain character variables import imageio imageio.plugins.ffmpeg.download() from", "must be located inside the 'files' folder): \") try: verifyExistence = os.stat(folderName).st_size except", "snipping\" yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits)) + \" parts\" time.sleep(1)", "ES #adjust switches as needed sleepingTime = 400 #___SWITCHES(defaults)___# #ES: cut transcript into", "are no mistakes (see README.md) and restart the program.\" exit() sp1 = sp", "or downloadCaptions == True or deleteVideos == True: args = argparser.parse_args() service =", "containing your transcript and/or video and/or subtitle files\\n(this folder must be located inside", "\"snippet\" object. ref = ref[key] return resource # Remove keyword arguments that are", "chunksize=-1, resumable=True), **kwargs ) vid = resumable_upload(request, 'video', 'insert') # See full sample", "processes to run) snipVideos = True #ES: upload video snippets uploadVideos = True", "+ fileName + \".txt\") as f: text = f.readlines() except IOError as e:", "name of the input .txt file (excluding .txt) #fileName = 'venant' #originalVideo refers", "digit and is not a next-line char #ES: removing punctuation from '[00:00:01.09]' since", "\") answer = verify_y_n_none(answer) if answer == 'y': snipVideos = True elif answer", "v3\" and ENABLE it click \"create credentials\" create and \"OAUT client id\" \"\"\"", "+ \" parts\" time.sleep(1) for s in splits: c += 1 if c", "20 minutes and several hours, depending on the size of your video file", "is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(2) + \" minutes...\" #", "snipVideos = True elif answer == 'n': uploadVideos = False elif answer ==", "retriable error occurred: %s\" % e if error is not None: print error", "print \"\\n\\n\" print \"\\n6.3 If your transcript has speaker names (e.g. the interviewer", "on which steps you would like to run. If this is your first", "print c,s media_file = folderName + '/' + fileName + \"_\" + str(c)", "\".flv\" caption_file = folderName + '/' + fileName + \"_\" + str(c) +", "allows a maximum of 100 video uploads per 24h using the current API", "has gone missing. The program will restart by uploading all videos. You may", "= True elif answer == 'n': placeBasedTimestamping = False elif answer == '':", "httplib2.RETRIES = 1 # Maximum number of times to retry before giving up.", "'wb') as f: pickle.dump(videoids, f) else: if resumeUploads == True or deleteVideos ==", "Youtube. Now trying to resume uploading the remaining snippets...)\" time.sleep(1) for s in", "be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" +", "print_results(results) #'snippet.playlistId': playlistID, playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId': vid, 'snippet.position': ''}, part='snippet', onBehalfOfContentOwner='') print", "body=dict( snippet=dict( videoId=id, language=language, name=originalVideo, isDraft=True, sync=False ) ), media_body=caption_file ).execute() print \"\\nFull", "\"client_secret.json\" #api key is <KEY> #client id is in client_id.json CLIENT_SECRETS_FILE = \"client_id.json\"", "like to reorganize subtitles to remove lone words? (Experimental) (n) \") answer =", "of timestamps demarcating the length of each video to which your subtitle files", "CLIENT_SECRETS_FILE variable specifies the name of a file that contains # the OAuth", "to prevent cut-up phrases, lone-word subtitles, and improve the subtitle structure overall (can", "answer = verify_y_n_none(answer) if answer == 'y': removeLoneWords = True elif answer ==", "how subtitles are altered when concatenating snippets (i.e. when combineSubtitles = True) #ES", "answer = verify_y_n_none(answer) if answer == 'y': combineSubtitles = True elif answer ==", "before giving up. MAX_RETRIES = 10 # Always retry when these exceptions are", "\":\" + str(int(sec)) #ES: open anita/Anita.txt as myfile try: with open(folderName + \"/\"", "text file found because you are not running the entire pipeline. Creating dummy", "file name of your transcript (excluding the \\\".txt\\\" extention): \") try: verifyExistence =", "next-line char #ES: removing punctuation from '[00:00:01.09]' since it is never qualified as", "and snipVideos == True: #ES: the following is called when videos are being", "\" to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in", "+ str(c) else: cc = str(c) #print subtitle print cc with open(folderName +", "print \"\\n6.3 If your transcript has speaker names (e.g. the interviewer or interviewee's", "\") answer = verify_y_n_none(answer) if answer == 'y': fullSentenceSubtitles = True elif answer", "False fileName = raw_input(\"Enter the file name of your transcript (excluding the \\\".txt\\\"", "s[1], targetname=folderName + \"/\" + fileName + \"_\" + str(c) +\".mp4\") media_file =", "Youtube will use this code for processing your files. Continue? (y/n) \") if", "<filename>main.py #requires oscar.mp4 video file in same folder #transcript text file \"oscar4.txt\" #might", "Leave properties with empty values out of the inserted resource. def build_resource(properties): resource", "mistakes (see README.md) and restart the program.\" exit() sp1 = sp num+=1 print", "you would like to run: \\n\\n\" time.sleep(2) answer = raw_input(\"\\n1/7 Will you be", "and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\" in t: #increase pos on texts by 1 c", "because you are not running the entire pipeline. Creating dummy file 'delete me.txt'", "int(t[1])*60 + float(t[2]) splits.append([t0,t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 =", "# while waitLonger == True: # try: subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger =", "(n) \") answer = verify_y_n_none(answer) if answer == 'y': resampleSubtitles = True elif", "answer == '': fullSentenceSubtitles = False answer = raw_input(\"\\n7.1.2 Would you like to", "\"Uploading file...\" status, response = request.next_chunk() if response is not None: if method", "switch combine_only allows some different functionality down the road combine_only = True fileName", "apply to your transcript, simply leave the following two answers blank by pressing", "if len(t) > 3 or len(t) < 3: print \"\\nOne of your timestamps", "= verify_y_n(answer) if answer == \"n\": print \"Please make sure you have the", "the video file including its ext #originalVideo = \"venant.mp4\" #interviewer = \"E.H.\" #interviewee", "'/' + fileName + \"_\" + str(c) + \".txt\" #print s,media_file,caption_file,videoids[c-1] a =", "following answers blank. For more advanced users or users who have already used", "deleteVideos = False answer = raw_input(\"\\n7/7 Will you be combining the downloaded subtitle", "processing is complete deleteVideos = False #ES: upload the full video and compiled", "you are not running the entire pipeline. Creating dummy file 'delete me.txt' to", "+ 'videoids.pkl', 'wb') as f: # pickle.dump(videoids, f) if resumeUploads == True: print", "make this into a list of cumulative times so that the rest of", "the transcript's timestamps t_list = [] #ES: PREPARE INPUT TEXT FOR PROCESSING if", "on it containing \" + str(len(splits)) + \" timestamps formatted like such '[HH:MM:SS.00]'.\"", "e: print e print \"The file named '\" + fileName + \".txt' does", "in an adjacent subtitle (verify)) removeLoneWords = False #____________# #ES: USER INTERVIEW SECTION", "prop_array[pa] # Convert a name like \"snippet.tags[]\" to snippet.tags, but handle # the", "snippets that were downloaded from Youtube into a total subtitle file. combineSubtitles =", "not None: if method == 'insert' and 'id' in response: print \"Video id", "oauth2client.file import Storage from oauth2client.tools import argparser, run_flow # The CLIENT_SECRETS_FILE variable specifies", "on your hard drive, and then restart the program.\" print \"exiting application...\" time.sleep(2)", "as e: print e print \"The folder named '\" + folderName + \"'", "random.random() * max_sleep print \"Sleeping %f seconds and then retrying...\" % sleep_seconds time.sleep(sleep_seconds)", "as private videos only visible to your account,\\n- uploads the text snippets to", "property in the # resource's \"snippet\" object. ref[key] = {} ref = ref[key]", "+ int(t[1])*60 + float(t[2]) splits.append([t0,t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0", "len(t) < 3: print \"\\nOne of your timestamps (\",':'.join(t) ,\") isn't formatted correctly.", "\".vtt\", 'w') as thefile: #thefile.write(sub_txt) thefile.write(subtitle) if cc == \"31\": print subtitle c", "you be cutting your video into video snippets (y) \") answer = verify_y_n_none(answer)", "True elif answer == 'n': downloadCaptions = False elif answer == '': downloadCaptions", "try: subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger = False # except: # waitLonger =", "video uploads per 24h using the current API credentials. Continue?\" print \"\\nIf all", "you have an associated transcript. Make sure you have gone over README.md before", "print commands were added for guidance. they can be removed. #ES: a list", "between 20 minutes and several hours, depending on the size of your video", "True) to warn the user as to how many videos will be uploaded.", "\") else: print \"You have not chosen any options for running this application.", "elif combineSubtitles == True: #in this case, the user has chosen to only", "+ fileName + \"_\" + str(c) +\".mp4\") media_file = folderName + '/' +", "f: videoids = pickle.load(f) print \"\\nThe video IDs are composed of the following:", "a video for which you have an associated transcript. Make sure you have", "extention): \") try: verifyExistence = os.stat(folderName + '/' + fileName + '.txt').st_size except", "to make sure that place names are never split between 2 timestamps, at", "These values will be used as offsets for accurately combining your subtitle files.\\nEach", "example, the property is \"snippet.title\", but the resource does # not yet have", "will resume in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) #search_response = service.search().list(", "# See full sample for function results = service.playlistItems().insert( body=resource, **kwargs ).execute() print_results(results)", "you like to reorganize subtitles to remove lone words? (Experimental) (n) \") answer", "your subtitles (e.g. en, fr, es, etc.):\\n(You can refer to the second column", "\"\" #t = t.replace(\" \", \"\") #t = t t = t.replace('[','').replace(']','').replace('\\n','') t", "print \"You have not chosen any options for running this application. Exiting...\" exit()", "unexpectedly before and you want to continue where you left off (uploadVideos must", "fileName = raw_input(\"In order to accurately combine subtitle files, you will need to", "(excluding the \\\".txt\\\" extention): \") else: print \"You have not chosen any options", "False and snipVideos == True: #ES: the following is called when videos are", "if combineSubtitles == True: answer = raw_input(\"\\n7.1 Would you like to reorganize subtitles", "following is called when videos are being uploaded (uploadVideos = True) to warn", "short, choppy, fast subtitles that are hard to read) (n) \") answer =", "'y' or a == 'n': return a else: a = raw_input(\"Please answer 'y'", "of text prior to timestamp) to thefile thefile.write(\"%s\\n\" % texts[c-1]) #time.sleep(.1) texts.append(\"\") texts[c]", "The program will restart by uploading all videos. You may need to remove", "language=language, name=media_file, isDraft=True, sync=True ) ), media_body=caption_file ).execute() captionsids.append(a['id']) c += 1 #print", "1 #print a wait = True with open(folderName + \"/\" + 'captionsids.pkl', 'wb')", "# waitLonger = True # print \"Waiting for transcripts \" + str(c) +", "+ \":\" + str(int(sec)) #ES: open anita/Anita.txt as myfile try: with open(folderName +", "associated transcript. Make sure you have gone over README.md before proceeding.\" time.sleep(1) print", "a list of video lengths, then we need to make this into a", "True elif answer == 'n': fullSentenceSubtitles = False elif answer == '': fullSentenceSubtitles", "= time.split(\" --> \") t_0 = time[0].split(\":\") t_1 = time[1].split(\":\") t0 = float(int(t_0[0])*3600)", "u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids = [u'cj62vgUfnik',", "import os #adjust sleeping time as needed - ES #adjust switches as needed", "'Frederic' #originalVideo = \"Frederic.mov\" #interviewer = \"M.M.\" #interviewee = \"B.K.\" #fileName = 'Berthe'", "time import strftime,localtime from postprocess_and_fuse_subs import compileSubs import pickle import os #adjust sleeping", "or deleteVideos == True or uploadTranscripts == True: with open(folderName + \"/\" +", "to script #playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language = 'fr' #change these variables according to", "= int(t[0])*60 + int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1) t0 = t0 + t1 else: t1", "error = None retry = 0 while response is None: try: print \"Uploading", "option to remove subtitle entries which may be a single word (and put", "only combining subtitle files, and we are using a .txt file with a", "chunk of text prior to timestamp) to thefile thefile.write(\"%s\\n\" % texts[c-1]) #time.sleep(.1) texts.append(\"\")", "'' or 'y': break #if combineSubtitles == True: print \"\\n\\n\" print \"\\n6.3 If", "this is your first time running the tool, simply leave the following answers", "uploaded.\" % response['id'] videoid = response['id'] elif method != 'insert' or 'id' not", "videoids.append(vid) print videoids #c += 1 wait = True with open(folderName + \"/\"", "'y': combineSubtitles = True elif answer == 'n': combineSubtitles = False elif answer", "u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with open(folderName + \"/\" + 'videoids.pkl', 'wb') as", "a problem with playlist id, might need to create a playlist in youtube", "than the timestamp that comes before it (\",str(sp1[1]),\" seconds), but it is smaller.\"", "Script will resume in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) id =", "\\n\\n\" time.sleep(2) answer = raw_input(\"\\n1/7 Will you be cutting your video into video", "(y/n) \") if verifyLanguage.lower() == '' or 'y': break #if combineSubtitles == True:", "pickle.load(f) #if wait == True: if downloadCaptions == True: print \"\\nDownloading captions...\" c", "response['id'] elif method != 'insert' or 'id' not in response: print response else:", "t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2])) t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60) +", "transcript's timestamps t_list = [] #ES: PREPARE INPUT TEXT FOR PROCESSING if snipTranscript", "print videoids #c += 1 wait = True with open(folderName + \"/\" +", "def upload_caption(youtube, video_id, language, name, file): insert_result = youtube.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=video_id,", "in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) id = vid print \"\\nUploading", "in '%s' language, '%s' status.\" % (name, # id, language, status) c =", "\" + str(c) + \" \" + captionsids[c-1] + \" to be processed", "verify_y_n_none(answer) if answer == 'y': resumeUploads = True elif answer == 'n': resumeUploads", "videos to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" +", "splits #print \"Wait\" #time.sleep(30) c = 0 #print splits videoids = [] #videoids", "videos you are uploading are identical. If so, do this manually on youtube.com", "\"/\" + originalVideo, part='snippet,status') # place video in custom playlist def playlist_items_insert(properties, **kwargs):", "scope allows for full read/write access to the # authenticated user's account and", "%f seconds and then retrying...\" % sleep_seconds time.sleep(sleep_seconds) return response['id'] if uploadTranscripts ==", "program.\" uploadVideos = True wait = False def yes_or_no(question): while \"the answer is", "\"/\" + \"delete me.txt\") as f: text = f.readlines() pass #print \"ES: text", "subtitle print cc with open(folderName + \"/\" + fileName + \"_\" + str(cc)", "for videos.insert def videos_insert(properties, media_file, **kwargs): resource = build_resource(properties) # See full sample", "print \"\\nexiting application...\" time.sleep(2) exit() if len(t) == 2: if combine_only == True:", "name called oscar #change certain character variables import imageio imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools import", "a variable for this) compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile = open(folderName + \"/\"", "+ '.txt').st_size except Exception as e: print e print \"The file named '\"", "the option to remove subtitle entries which may be a single word (and", "True: language = raw_input(\"Enter the language code of your video and transcript or", "is_array = False key = prop_array[pa] # Convert a name like \"snippet.tags[]\" to", "\\n to the end of each line (why?) t += \"\\n\" #ES: if", "+ \"/\" + \"delete me.txt\") as f: text = f.readlines() pass #print \"ES:", "files\\n(this folder must be located inside the 'files' folder): \") try: verifyExistence =", "read/write access to the # authenticated user's account and requires requests to use", "True: answer = raw_input(\"\\n7.1 Would you like to reorganize subtitles according to punctuation?", "= \"B.K.\" #fileName = 'Berthe' #originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer = \"S.G.\" #interviewee =", "were selected. Exiting...\" exit() #ES: UPLOADS THE VIDEOS if uploadVideos == True: #ES:", "uploadFull == True: print \"\\nUploading full video...\" vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage':", "exit() print \"\\n\" elif combineSubtitles == True: #in this case, the user has", "'n' or a == '': return a else: a = raw_input(\"Please answer 'y'", "while True: language = raw_input(\"Enter the language code of your video and transcript", "= [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0',", "= 1 captionsids = [] wait = False if uploadTranscripts == True: #print", "raw_input(\"Enter the name of the folder containing your transcript and/or video and/or subtitle", "import build from apiclient.errors import HttpError from apiclient.http import MediaFileUpload from oauth2client.client import", "# Sample Python code for user authorization import httplib2 import os import sys", "being excessively large) fullSentenceSubtitles = False #ES: IF you enabled 'resampleSubtitles' (above), you", "None: for key, value in kwargs.iteritems(): if value: good_kwargs[key] = value return good_kwargs", "which steps you would like to run. If this is your first time", "point by pressing Ctrl+C (Cmd+C on Mac).\" time.sleep(1) print \"\\n\" print \"This tool:\\n-", "raise except RETRIABLE_EXCEPTIONS, e: error = \"A retriable error occurred: %s\" % e", "a == 'y' or a == 'n' or a == '': return a", "in range ...\" loop, we will be setting a property in the #", "#ES: removing punctuation from '[00:00:01.09]' since it is never qualified as a digit", "the current directory. Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit()", "answer = verify_y_n_none(answer) if answer == 'y': deleteVideos = True elif answer ==", "out of the inserted resource. def build_resource(properties): resource = {} for p in", "resource. def build_resource(properties): resource = {} for p in properties: # Given a", "a maximum of 100 video uploads per 24h using the current API credentials.", "= True #ES: delete uploaded video snippets from your Youtube account once subtitle", "#if wait == True: if downloadCaptions == True: print \"\\nDownloading captions...\" c =", "print c,s,captionsids[c-1] sub_txt = \"\" # while waitLonger == True: # try: subtitle", "become excessively long) (n) \") answer = verify_y_n_none(answer) if answer == 'y': fullSentenceSubtitles", "downloadCaptions = False elif answer == '': downloadCaptions = True answer = raw_input(\"\\n6/7", "fileName + \"_\" + str(cc) + \".vtt\", 'w') as thefile: #thefile.write(sub_txt) thefile.write(subtitle) if", "answer == '': uploadTranscripts = True answer = raw_input(\"\\n5/7 Will you be downloading", "verifyLanguage = raw_input(\"\\nYou have entered '\" + language + \"' as the language", "+ originalVideo).st_size except Exception as e: print e print \"The file named '\"", "of the pipeline can run if combine_only == True: t1 = int(t[0])*3600 +", "time.sleep(2) answer = raw_input(\"\\n1/7 Will you be cutting your video into video snippets", "f:videoids = pickle.load(f) except Exception as e: print e print \"\\nThe program is", "and then restart the program.\" uploadVideos = True wait = False def yes_or_no(question):", "part=\"snippet\", body=dict( snippet=dict( videoId=video_id, language=language, name=name, isDraft=True ) ), media_body=file ).execute() id =", "if answer == 'y': resampleSubtitles = True elif answer == 'n': resampleSubtitles =", "#print str(splits) #print str(t_list) for sp in splits: if num > 0: if", "status.\" % (name, # id, language, status) c = 1 captionsids = []", "'/' + fileName + \"_\" + str(c) + \".flv\" caption_file = folderName +", "== True or uploadTranscripts == True: with open(folderName + \"/\" + 'videoids.pkl', 'rb')", "t to position c of texts texts[c] += t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this", "storage = Storage(\"youtube-api-snippets-oauth2.json\") credentials = storage.get() if credentials is None or credentials.invalid: credentials", "anita/Anita.txt as myfile try: with open(folderName + \"/\" + fileName + \".txt\", 'r')", "good_kwargs = {} if kwargs is not None: for key, value in kwargs.iteritems():", "if cc == \"31\": print subtitle c += 1 time.sleep(3) #deletes videos from", "u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA',", "INTERVIEW SECTION def verify_y_n(a): while True: a = a.lower().strip() if a == 'y'", "3 or len(t) < 3: print \"\\nOne of your timestamps (\",':'.join(t) ,\") isn't", "as f: text = f.readlines() pass #print \"ES: text is the following\" +", "removed. #ES: a list of the transcript's timestamps t_list = [] #ES: PREPARE", "has speaker names (e.g. the interviewer or interviewee's names) that precede their discourse", "using a .txt file with a list of video lengths, then we need", "= raw_input(\"In order to accurately combine subtitle files, you will need to create", "the video and text snippets\\n- downloads the text snippets as subtitle files (.vtt),\\n-", "exit(\"The upload failed with an unexpected response: %s\" % response) except HttpError, e:", "name of your timestamp list (excluding the \\\".txt\\\" extention): \") else: print \"You", "this discovery document from the developers page # and it should be in", "while True: a = a.lower().strip() if a == 'y' or a == 'n'", "waitLonger == True: # try: subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger = False #", "%s\" % e if error is not None: print error retry += 1", "gone missing. The program will restart by uploading all videos. You may need", "= verify_y_n_none(answer) if answer == 'y': resampleSubtitles = True elif answer == 'n':", "(above), you have the option to remove subtitle entries which may be a", "False (make a variable for this) compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile = open(folderName", "'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, folderName + \"/\" + originalVideo, part='snippet,status') # place video", "answer = raw_input(\"\\n7.1.1 Would you like to reorganize subtitles to prioritize keeping full", "transcript or the intended language code of your subtitles (e.g. en, fr, es,", "'', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, media_file, part='snippet,status') videoids.append(vid) print videoids #c", "[\"\"] t0 = 0 c = 0 #ES: several print commands were added", "if uploadVideos == False and snipVideos == True: #ES: the following is called", "time.split(\" --> \") t_0 = time[0].split(\":\") t_1 = time[1].split(\":\") t0 = float(int(t_0[0])*3600) +", "run.\" exit() #ES print texts[c] #print \"splits: \" + str(splits) #for i in", "should be written as follows [HH:MM:SS.00], followed by a newline.\\n\\nPlease enter the file", "u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0',", "use this code for processing your files. Continue? (y/n) \") if verifyLanguage.lower() ==", "exceptions are raised. RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady,", "from apiclient.discovery import build from apiclient.errors import HttpError from apiclient.http import MediaFileUpload from", "try: verifyExistence = os.stat(folderName + '/' + originalVideo).st_size except Exception as e: print", "- ES #interviewer = \"C.V.\" #interviewee = \"V.S.\" #where the video and txt", "is not None: for key, value in kwargs.iteritems(): if value: good_kwargs[key] = value", "you a few questions...\\\"), please input them. If this does not apply to", "the timestamp that comes before it (\",str(sp1[1]),\" seconds), but it is smaller.\" print", "your video into video snippets (y) \") answer = verify_y_n_none(answer) if answer ==", "YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME = \"youtube\" API_VERSION = \"v3\" # This variable defines", "removeLoneWords = True elif answer == 'n': removeLoneWords = False elif answer ==", "error retry += 1 if retry > MAX_RETRIES: exit(\"No longer attempting to retry.\")", "time.sleep(2) exit() print \"\\n\" originalVideo = raw_input(\"Enter the file name of your video", "you like your uploaded video snippets to be deleted from Youtube once subtitles", "OAuth 2.0\" # Authorize the request and store authorization credentials. def get_authenticated_service(args): flow", "response: print response else: exit(\"The upload failed with an unexpected response: %s\" %", "#ES: write the previous position of c in texts (a chunk of text", "to run. If this is your first time running the tool, simply leave", "elif answer == '': placeBasedTimestamping = False print \"\\n\" folderName = raw_input(\"Enter the", "+ str(c) with open(folderName + \"/\" + fileName + \"_\" + str(c) +", "import strftime,localtime from postprocess_and_fuse_subs import compileSubs import pickle import os #adjust sleeping time", "print \"\\nIf all input was correct, the program will begin snipping\" yes_or_no(question) print", "= [] # #for search_result in search_response.get(\"items\", []): # videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"])) #", "videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title':", "except Exception as e: print e print \"The folder named '\" + folderName", "position c of texts texts[c] += t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this will aggregate", "properties: # Given a key like \"snippet.title\", split into \"snippet\" and \"title\", where", "transcript to your Youtube account once complete uploadFull = False #ES: combine vtt", "'', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, media_file, part='snippet,status') videoids.append(vid) print videoids #c += 1", "#ES: resample subtitles to prevent cut-up phrases, lone-word subtitles, and improve the subtitle", "available space on your hard drive, and then restart the program.\" print \"exiting", "'snippet.title': fileName, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, folderName + \"/\"", "terminated unexpectedly before and you want to continue where you left off (uploadVideos", "timestamps formatted like such '[HH:MM:SS.00]'.\" else: print \"Please set the variable 'snipTranscript' to", "used as offsets for accurately combining your subtitle files.\\nEach timestamp should be written", "you left off (uploadVideos must still be set to True): resumeUploads = False", "max_sleep = 2 ** retry sleep_seconds = random.random() * max_sleep print \"Sleeping %f", "# Trusted testers can download this discovery document from the developers page #", "according to what story you want to process - ES #interviewer = \"C.V.\"", "answer = raw_input(\"If this is your first time running this tool on the", "file...\" status, response = request.next_chunk() if response is not None: if method ==", "combining subtitle files, and we are using a .txt file with a list", "import time from time import strftime,localtime from postprocess_and_fuse_subs import compileSubs import pickle import", "= True elif answer == 'n': resumeUploads = False elif answer == '':", "True answer = raw_input(\"\\n2/7 Will you be uploading video snippets to Youtube for", "are associated. These values will be used as offsets for accurately combining your", "(False) and therefore the following condition is never met. if t != \"\"", "Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" originalVideo", "'oscar' #fileName = 'oscar' #originalVideo = \"Oscar.mp4\" ### START BOILERPLATE CODE # Sample", "in text] #split times (?) splits = [] #list of cut-up texts texts", "id = vid print \"\\nUploading compiled subtitles...\" caption_file = folderName + '/' +", "input was correct, the program will begin snipping and uploading content to Youtube", "developers page # and it should be in the same directory with the", "resampleSubtitles = False #ES: IF you enabled 'resampleSubtitles' (above), you have the option", "to True): resumeUploads = False #ES: upload snippet transcripts (.txt) uploadTranscripts = True", "RETRIABLE_EXCEPTIONS, e: error = \"A retriable error occurred: %s\" % e if error", "created. Continue?\" print \"\\nIf all input was correct, the program will begin snipping\"", "allows for full read/write access to the # authenticated user's account and requires", "we will be setting a property in the # resource's \"snippet\" object. ref[key]", "except ValueError as e: print e print \"\\n One of your timestamps isn't", "video? (y) \") answer = verify_y_n_none(answer) if answer == 'y': combineSubtitles = True", "= False fileName = raw_input(\"Enter the file name of your transcript (excluding the", "and/or video and/or subtitle files\\n(this folder must be located inside the 'files' folder):", "hours, depending on the size of your video file (\" + str(videoSize) +", "seconds), but it is smaller.\" print \"Please make sure your timestamps are in", "subtitles according to punctuation? (Experimental; can lead to short, choppy, fast subtitles that", "t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t1]) #print int(t[0])*3600 + int(t[1])*60 +", "= (httplib2.HttpLib2Error, IOError, httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine) # Always retry", "see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" originalVideo =", "formatting.\" print \"\\nVerifying if timestamps are in ascending order...\" sp1 = 0 num", "#\",str(num+2),\" (equivalent to \",str(sp[1]),\" seconds) should be a larger number than the timestamp", "True if reply[0] == 'n': exit() if uploadVideos == False and snipVideos ==", "True elif answer == 'n': resampleSubtitles = False elif answer == '': resampleSubtitles", "# time.sleep(120) sub_txt += subtitle cc = \"\" if c < 10: cc", "= t.replace('[','').replace(']','').replace('\\n','') t = unicode(t, \"UTF-8\") #split the timestamps at : (into 3)", "don't think this function is ever called... # Call the API's captions.insert method", "combine subtitles. the switch combine_only allows some different functionality down the road combine_only", "= verify_y_n_none(answer) if answer == 'y': deleteVideos = True elif answer == 'n':", "'n': return a else: a = raw_input(\"Please answer 'y' or 'n': \") continue", "your video? (y) \") answer = verify_y_n_none(answer) if answer == 'y': combineSubtitles =", "set to True for other processes to run) snipTranscript = True #ES: cut", "to use an SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME = \"youtube\" API_VERSION =", "True or deleteVideos == True or uploadTranscripts == True: with open(folderName + \"/\"", "#compiles them all print \"\\nCombining subtitle snippets ...\" #ES: this is a feature", "raw_input(\"Enter the file name of your video (this time including the file's extention):", "e.resp.status in RETRIABLE_STATUS_CODES: error = \"A retriable HTTP error %d occurred:\\n%s\" % (e.resp.status,e.content)", "results = service.playlistItems().insert( body=resource, **kwargs ).execute() print_results(results) #'snippet.playlistId': playlistID, playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId':", "\"oscar4.txt\" #might have to be in a folder name called oscar #change certain", "'': fullSentenceSubtitles = False answer = raw_input(\"\\n7.1.2 Would you like to reorganize subtitles", "in ascending order...\" sp1 = 0 num = 0 #print str(splits) #print str(t_list)", "'w') as thefile: #thefile.write(sub_txt) thefile.write(subtitle) if cc == \"31\": print subtitle c +=", "'snippet.title': media_file, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, media_file, part='snippet,status') videoids.append(vid)", "MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\" # Authorize the request and store", "print \"\\nCombining subtitle snippets ...\" #ES: this is a feature that needs exploration", "str(len(splits)) + \" text snippets based on it containing \" + str(len(splits)) +", "t0 = t0 + t1 else: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t1]) t_list.append(t1)", "input your interviewee's name as it appears in the transcript: \") print \"\\n\"", "c < 10: cc = \"0\" + str(c) else: cc = str(c) #print", "= resumable_upload(request, 'video', 'insert') # See full sample for function return vid def", "combine vtt snippets that were downloaded from Youtube into a total subtitle file.", "if a == 'y' or a == 'n' or a == '': return", "appears in the transcript: \") print \"\\n\" #____________# # let rodolphe know if", "Creating dummy file 'delete me.txt' to finish pipeline.\" foo = open(folderName + \"/\"", "may need to remove any previously-uploaded videos if the videos you are uploading", "RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine) # Always", "texts[c-1]) #time.sleep(.1) texts.append(\"\") texts[c] = \"\" #t = t.replace(\" \", \"\") #t =", "code for videos.insert def videos_insert(properties, media_file, **kwargs): resource = build_resource(properties) # See full", "u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI',", "accurately combine subtitle files, you will need to create a list of timestamps", "+ \"' does not exist in the folder '\" + folderName + \"'.", "files you have indicated, you will temporarily require \" + str(videoSize) + \"", "function results = service.playlistItems().insert( body=resource, **kwargs ).execute() print_results(results) #'snippet.playlistId': playlistID, playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video',", "complete uploadFull = False #ES: combine vtt snippets that were downloaded from Youtube", "video (this time including the file's extention): \") try: verifyExistence = os.stat(folderName +", "exit() while True: language = raw_input(\"Enter the language code of your video and", "be cutting your video into video snippets (y) \") answer = verify_y_n_none(answer) if", "= \"V.S.\" #where the video and txt files are stored #folderName = 'venant'", "originalVideo).st_size except Exception as e: print e print \"The file named '\" +", "file including its ext #originalVideo = \"venant.mp4\" #interviewer = \"E.H.\" #interviewee = \"E.M.\"", "'wb') as f: # pickle.dump(videoids, f) if resumeUploads == True: print \"\\nResuming video", "= True answer = raw_input(\"\\n6/7 Would you like your uploaded video snippets to", "+ \"/\" + 'videoids.pkl', 'rb') as f:videoids = pickle.load(f) except Exception as e:", "from youtube -ES if deleteVideos == True: print \"\\nDeleting videos...\\n\" c = 1", "import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser, run_flow # The", "timestamp list (excluding the \\\".txt\\\" extention): \") else: print \"You have not chosen", "this function is ever called... # Call the API's captions.insert method to upload", "== 'n': uploadVideos = False elif answer == '': uploadVideos = True answer", "the resource # already has a \"snippet\" object. ref = ref[key] return resource", "not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t != \"\\n\": #ES: add t to position c of", "\"Please make sure you have the available space on your hard drive, and", "+ 'videoids.pkl', 'rb') as f: videoids = pickle.load(f) print \"\\nThe video IDs are", "#thefile.write(sub_txt) thefile.write(subtitle) if cc == \"31\": print subtitle c += 1 time.sleep(3) #deletes", "import ffmpeg_extract_subclip import time from time import strftime,localtime from postprocess_and_fuse_subs import compileSubs import", "exit() #ES print texts[c] #print \"splits: \" + str(splits) #for i in splits:", "authorization import httplib2 import os import sys import httplib import random from apiclient.discovery", "as myfile try: with open(folderName + \"/\" + fileName + \".txt\", 'r') as", "when these exceptions are raised. RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected, httplib.IncompleteRead, httplib.ImproperConnectionState, httplib.CannotSendRequest,", "False elif answer == '': downloadCaptions = True answer = raw_input(\"\\n6/7 Would you", "t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t1]) t_list.append(t1) t0 = t1 elif len(t) ==", "u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w',", "and you want to continue where you left off (uploadVideos must still be", "for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" videoSize = os.stat(folderName +", "= True if pa == (len(prop_array) - 1): # Leave properties without values", "'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) if wait == True: print \"\\nWaiting for", "None retry = 0 while response is None: try: print \"Uploading file...\" status,", "subtitle files, and we are using a .txt file with a list of", "where # \"snippet\" will be an object and \"title\" will be a property", "the video/caption upload process was terminated unexpectedly before and you want to continue", "For example, the property is \"snippet.title\", but the resource does # not yet", "\"\\n\" videoSize = os.stat(folderName + '/' + originalVideo).st_size/1000000 answer = raw_input(\"If this is", "\"\\nThe document named '\" + fileName + \".txt' was cut into \" +", "u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s',", "\"\\n1. Slicing into \" + str(len(splits)) + \" parts\" time.sleep(1) for s in", "lead to short, choppy, fast subtitles that are hard to read) resampleSubtitles =", "depending on which steps you would like to run. If this is your", "an apiclient.errors.HttpError with one of these status # codes is raised. RETRIABLE_STATUS_CODES =", "+ fileName + \"_\" + str(c) + \".txt\", 'w') try: #ES: write the", "uploadVideos = False elif answer == '': uploadVideos = True answer = raw_input(\"\\n3/7", "if timestamps are in ascending order...\" sp1 = 0 num = 0 #print", "like \",len(videoids),\" video snippets were already uploaded to Youtube. Now trying to resume", "exit(\"No longer attempting to retry.\") max_sleep = 2 ** retry sleep_seconds = random.random()", "Python code for user authorization import httplib2 import os import sys import httplib", "only combine subtitles. the switch combine_only allows some different functionality down the road", "u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek',", "**kwargs ) vid = resumable_upload(request, 'video', 'insert') # See full sample for function", "True wait = False def yes_or_no(question): while \"the answer is invalid\": reply =", "in kwargs.iteritems(): if value: good_kwargs[key] = value return good_kwargs ### END BOILERPLATE CODE", "properly run.\" exit() #ES print texts[c] #print \"splits: \" + str(splits) #for i", "#thefile = open(folderName + \"/\" + fileName + \".srt\", 'w') #thefile.write(compiledSubs) if uploadFull", "snippets to Youtube as transcript files for these video snippets,\\n- allows Youtube to", "\") answer = verify_y_n_none(answer) if answer == 'y': removeLoneWords = True elif answer", "guidelines on proper timestamp formatting.\" print \"\\nVerifying if timestamps are in ascending order...\"", "#videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs',", "#t = t t = t.replace('[','').replace(']','').replace('\\n','') t = unicode(t, \"UTF-8\") #split the timestamps", "localtime()),\". Script will resume in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) id", "the following switches control how subtitles are altered when concatenating snippets (i.e. when", "False elif answer == '': resumeUploads = False answer = raw_input(\"\\n4/7 Will you", "application...\" time.sleep(2) exit() print \"\\n\" videoSize = os.stat(folderName + '/' + originalVideo).st_size/1000000 answer", "the beginning of the line is not a digit and is not a", "> 0: print \"(However, it looks like \",len(videoids),\" video snippets were already uploaded", "for accurately combining your subtitle files.\\nEach timestamp should be written as follows [HH:MM:SS.00],", "return resource # Remove keyword arguments that are not set def remove_empty_kwargs(**kwargs): good_kwargs", "like such '[HH:MM:SS.00]'.\" else: print \"Please set the variable 'snipTranscript' to True so", "based on its timestamps,\\n- snips the associated video accordingly into video snippets,\\n- uploads", "you like to reorganize subtitles to prioritize keeping full sentences intact? (Experimental; this", "never qualified as a digit (False) and therefore the following condition is never", "to the name of the input .txt file (excluding .txt) #fileName = 'venant'", "wait = True with open(folderName + \"/\" + 'captionsids.pkl', 'wb') as f: pickle.dump(captionsids,", "def remove_empty_kwargs(**kwargs): good_kwargs = {} if kwargs is not None: for key, value", "'Enter' key.\" time.sleep(1) interviewer = raw_input(\"\\n6.3.1 Please input your interviewer's name as it", "the folder '\" + folderName + \"'. Please see README.md for instructions.\" print", "\"You have not chosen any options for running this application. Exiting...\" exit() while", "\"\\nOne of your timestamps (\",':'.join(t) ,\") isn't formatted correctly. Consult README.md for guidelines", "giving up. MAX_RETRIES = 10 # Always retry when these exceptions are raised.", "if resumeUploads == True: print \"\\nResuming video uploads...\\n\" time.sleep(1) try: with open(folderName +", "(and put them in an adjacent subtitle (verify)) removeLoneWords = False #____________# #ES:", "process? (n) \") answer = verify_y_n_none(answer) if answer == 'y': resumeUploads = True", "your files. Continue? (y/n) \") if verifyLanguage.lower() == '' or 'y': break #if", "s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print splits #print \"Wait\" #time.sleep(30) c = 0", "associated video accordingly into video snippets,\\n- uploads these video snippets to Youtube as", "a client secret file: google apis dashboard --> create a new project on", "= f.readlines() except IOError as e: print \"No text file found because you", "take between 20 minutes and several hours, depending on the size of your", "= False print \"\\n\" folderName = raw_input(\"Enter the name of the folder containing", "the transcript: \") print \"\\n\" #____________# # let rodolphe know if there is", "= [x.strip() for x in text] #split times (?) splits = [] #list", "'snippet.tags[]': '', 'snippet.title': media_file, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, media_file,", "is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime)", "prop_array = p.split('.') ref = resource for pa in range(0, len(prop_array)): is_array =", "raw_input(\"\\n6.3.2 Please input your interviewee's name as it appears in the transcript: \")", "open(folderName + \"/\" + 'captionsids.pkl', 'rb') as f: captionsids = pickle.load(f) #if wait", "#print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this will aggregate phrases (t) into one list item (a", "resume uploads because there are no uploads to resume or your 'videoids.pkl' file", "video snippets were already uploaded to Youtube. Now trying to resume uploading the", "fileName + \".txt\") as f: text = f.readlines() except IOError as e: print", "os #adjust sleeping time as needed - ES #adjust switches as needed sleepingTime", "+ 'captionsids.pkl', 'rb') as f: captionsids = pickle.load(f) #if wait == True: if", "video and transcript or the intended language code of your subtitles (e.g. en,", "in response: print \"Video id '%s' was successfully uploaded.\" % response['id'] videoid =", "for s in splits: c += 1 if c > len(videoids): ffmpeg_extract_subclip(folderName +", "text snippets based on it containing \" + str(len(splits)) + \" timestamps formatted", "{'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '',", "fileName + \".txt' does not exist in the folder '\" + folderName +", "not a digit and is not a next-line char #ES: removing punctuation from", "sp num+=1 print \"\\nThe document named '\" + fileName + \".txt' was cut", "a = raw_input(\"Please answer 'y' or 'n': \") continue def verify_y_n_none(a): while True:", "your transcript has speaker names (e.g. the interviewer or interviewee's names) that precede", "(Cmd+C on Mac).\" time.sleep(1) print \"\\n\" print \"This tool:\\n- snips your transcript (.txt)", "+ \"/\" + originalVideo, part='snippet,status') # place video in custom playlist def playlist_items_insert(properties,", "time[1].split(\":\") t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2])) t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60)", "print \"Timestamp number #\",str(num+2),\" (equivalent to \",str(sp[1]),\" seconds) should be a larger number", "argparser.parse_args() service = get_authenticated_service(args) def print_results(results): print(results) # Build a resource based on", "isDraft=True, sync=True ) ), media_body=caption_file ).execute() captionsids.append(a['id']) c += 1 #print a wait", "= int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t0+t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2])", "to Youtube. Now trying to resume uploading the remaining snippets...)\" time.sleep(1) for s", "= \"Frederic.mov\" #interviewer = \"M.M.\" #interviewee = \"B.K.\" #fileName = 'Berthe' #originalVideo =", "folder): \") try: verifyExistence = os.stat(folderName).st_size except Exception as e: print e print", "#time.sleep(30) c = 0 #print splits videoids = [] #videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk',", "sleepingTime) else: if downloadCaptions == True: with open(folderName + \"/\" + 'captionsids.pkl', 'rb')", "u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0']", "combineSubtitles == True: answer = raw_input(\"\\n7.1 Would you like to reorganize subtitles according", "the following is called when videos are being uploaded (uploadVideos = True) to", "\"\\nCombining subtitle snippets ...\" #ES: this is a feature that needs exploration so", "been successfully generated? (n) \") answer = verify_y_n_none(answer) if answer == 'y': deleteVideos", "for t in text: #add a \\n to the end of each line", "args) # Trusted testers can download this discovery document from the developers page", "+ \". \" + str(len(splits)) + \" video snippets will therefore be uploaded", "retry > MAX_RETRIES: exit(\"No longer attempting to retry.\") max_sleep = 2 ** retry", "#t = t.replace(\" \", \"\") #t = t t = t.replace('[','').replace(']','').replace('\\n','') t =", "def hms_to_s(time): time = unicode(time, \"UTF-8\") time = time.split(\" --> \") t_0 =", "t_0 = time[0].split(\":\") t_1 = time[1].split(\":\") t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2]))", "your video.\\n\\nYou may switch these processes 'on' or 'off' depending on which steps", "\"for pa in range ...\" loop, we will be setting a property in", "= t t = t.replace('[','').replace(']','').replace('\\n','') t = unicode(t, \"UTF-8\") #split the timestamps at", "print \"Waiting for transcripts \" + str(c) + \" \" + captionsids[c-1] +", "folder #transcript text file \"oscar4.txt\" #might have to be in a folder name", "Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" videoSize", "of cumulative times so that the rest of the pipeline can run if", "+ fileName + \"_\" + str(c) + \".txt\", 'w') as thefile: #thefile =", "#time.sleep(60) #print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print splits #print \"Wait\" #time.sleep(30) c = 0 #print", "# Always retry when these exceptions are raised. RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,", "#originalVideo = \"venant.mp4\" #interviewer = \"E.H.\" #interviewee = \"E.M.\" #fileName = 'Frederic' #originalVideo", "True or resumeUploads == True or downloadCaptions == True or deleteVideos == True:", "videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]':", "as f: # pickle.dump(videoids, f) if resumeUploads == True: print \"\\nResuming video uploads...\\n\"", "in splits: print c,s media_file = folderName + '/' + fileName + \"_\"", "part='snippet', onBehalfOfContentOwner='') print \"Waiting for full video to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\".", "improve the subtitle structure overall (can lead to short, choppy, fast subtitles that", "False answer = raw_input(\"\\n7/7 Will you be combining the downloaded subtitle snippets into", "snippet=dict( videoId=videoids[c-1], language=language, name=media_file, isDraft=True, sync=True ) ), media_body=caption_file ).execute() captionsids.append(a['id']) c +=", "print \"\\n\" if snipVideos == True or uploadTranscripts == True or resumeUploads ==", "'': combineSubtitles = True if combineSubtitles == True: answer = raw_input(\"\\n7.1 Would you", "from Youtube into a total subtitle file. combineSubtitles = True #ES: the following", "\"\\n\" elif combineSubtitles == True: #in this case, the user has chosen to", "#fileName refers to the name of the input .txt file (excluding .txt) #fileName", "#ES: UPLOADS THE VIDEOS if uploadVideos == True: #ES: the following is called", "# place video in custom playlist def playlist_items_insert(properties, **kwargs): resource = build_resource(properties) #", "'') with open(folderName + \"/\" + \"delete me.txt\") as f: text = f.readlines()", "captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(2 * sleepingTime", "remove subtitle entries which may be a single word (and put them in", "== True: with open(folderName + \"/\" + 'captionsids.pkl', 'rb') as f: captionsids =", "to create a list of timestamps demarcating the length of each video to", "e print \"The file named '\" + fileName + \".txt' does not exist", "key = key[0:len(key)-2:] is_array = True if pa == (len(prop_array) - 1): #", "manually on youtube.com and then restart the program.\" uploadVideos = True wait =", "== True: #in this case, the user has chosen to only combine subtitles.", "1 time.sleep(3) #deletes videos from youtube -ES if deleteVideos == True: print \"\\nDeleting", "== True: with open(folderName + \"/\" + 'videoids.pkl', 'rb') as f: videoids =", "be written as follows [HH:MM:SS.00], followed by a newline.\\n\\nPlease enter the file name", "p in properties: # Given a key like \"snippet.title\", split into \"snippet\" and", "be uploaded. question = \"\\nThere were \" + str(len(splits)) + \" timestamps detected", "# videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"])) # #print \"Videos:\\n\", \"\\n\".join(videos), \"\\n\" #ES: I don't think", ").execute() id = insert_result[\"id\"] name = insert_result[\"snippet\"][\"name\"] language = insert_result[\"snippet\"][\"language\"] status = insert_result[\"snippet\"][\"status\"]", "def videos_insert(properties, media_file, **kwargs): resource = build_resource(properties) # See full sample for function", "= folderName + '/' + fileName + \"_\" + str(c) + \".flv\" caption_file", "'n': removeLoneWords = False elif answer == '': removeLoneWords = False answer =", "make sure that place names are never split between 2 timestamps, at the", "print \"Uploading file...\" status, response = request.next_chunk() if response is not None: if", "needed - ES #adjust switches as needed sleepingTime = 400 #___SWITCHES(defaults)___# #ES: cut", "with your video snippets? (y) \") answer = verify_y_n_none(answer) if answer == 'y':", "+ \"' as the language code for your transcript and video files. Youtube", "c = 0 #ES: several print commands were added for guidance. they can", "(uploadVideos = True) to warn the user as to how many videos will", "part=\"id\", # type=\"video\", # fields=\"items/id\" #).execute() # #videos = [] # #for search_result", "= raw_input(\"\\n7.1.2 Would you like to reorganize subtitles to remove lone words? (Experimental)", "click \"create credentials\" create and \"OAUT client id\" \"\"\" #CLIENT_SECRETS_FILE = \"client_secret.json\" #api", "10: cc = \"0\" + str(c) else: cc = str(c) #print subtitle print", "elif answer == 'n': fullSentenceSubtitles = False elif answer == '': fullSentenceSubtitles =", "u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo',", "body=dict( snippet=dict( videoId=video_id, language=language, name=name, isDraft=True ) ), media_body=file ).execute() id = insert_result[\"id\"]", "the videos you are uploading are identical. If so, do this manually on", "this is a feature that needs exploration so as to make sure that", "len(t) == 3: #if we are only combining subtitle files, and we are", "resumeUploads = False #ES: upload snippet transcripts (.txt) uploadTranscripts = True #ES: download", "!= 'insert' or 'id' not in response: print response else: exit(\"The upload failed", "+ fileName + \".txt' was cut into \" + str(len(splits)) + \" text", "you have gone over README.md before proceeding.\" time.sleep(1) print \"You may terminate the", "str(int(h)) + \":\" + str(int(m)) + \":\" + str(int(sec)) #ES: open anita/Anita.txt as", "= \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME = \"youtube\" API_VERSION = \"v3\" # This variable defines a", "\"Sleeping %f seconds and then retrying...\" % sleep_seconds time.sleep(sleep_seconds) return response['id'] if uploadTranscripts", "open(folderName + \"/\" + fileName + \".txt\") as f: text = f.readlines() except", "into a total subtitle file. combineSubtitles = True #ES: the following switches control", "\".txt\", 'r') as myfile: text = myfile.read().replace('\\n', '') #print \"ES: replace \\\\n with", "time.sleep(1) for s in splits: c += 1 if c > len(videoids): ffmpeg_extract_subclip(folderName", "uploads...\\n\" time.sleep(1) try: with open(folderName + \"/\" + 'videoids.pkl', 'rb') as f:videoids =", "minutes...\" time.sleep(sleepingTime) id = vid print \"\\nUploading compiled subtitles...\" caption_file = folderName +", "Youtube? (y) \") answer = verify_y_n_none(answer) if answer == 'y': downloadCaptions = True", "PREPARE INPUT TEXT FOR PROCESSING if snipTranscript == True: for t in text:", "+ originalVideo + \"' does not exist in the folder '\" + folderName", "your timestamps:\" print \"Timestamp number #\",str(num+2),\" (equivalent to \",str(sp[1]),\" seconds) should be a", "youtube online and copy url id to script #playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language =", "False elif answer == '': resampleSubtitles = False if resampleSubtitles == True: answer", "'w') try: #ES: write the previous position of c in texts (a chunk", "print \"The file named '\" + originalVideo + \"' does not exist in", "print subtitle c += 1 time.sleep(3) #deletes videos from youtube -ES if deleteVideos", "= \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language = 'fr' #change these variables according to what story you", "answer == 'y': resumeUploads = True elif answer == 'n': resumeUploads = False", "the value as an array. if key[-2:] == '[]': key = key[0:len(key)-2:] is_array", "the language code for your transcript and video files. Youtube will use this", "#transcript text file \"oscar4.txt\" #might have to be in a folder name called", "pipeline can run if combine_only == True: t1 = int(t[0])*3600 + int(t[1])*60 +", "folderName + \"' does not exist in the current directory. Please see README.md", "more advanced users or users who have already used this tool, please select", "code of your subtitles (e.g. en, fr, es, etc.):\\n(You can refer to the", "API_VERSION, http=credentials.authorize(httplib2.Http())) # Explicitly tell the underlying HTTP transport library not to retry,", "elif answer == 'n': downloadCaptions = False elif answer == '': downloadCaptions =", "httplib.ResponseNotReady, httplib.BadStatusLine) # Always retry when an apiclient.errors.HttpError with one of these status", "Storage from oauth2client.tools import argparser, run_flow # The CLIENT_SECRETS_FILE variable specifies the name", "= verify_y_n_none(answer) if answer == 'y': uploadTranscripts = True elif answer == 'n':", "text = myfile.read().replace('\\n', '') with open(folderName + \"/\" + \"delete me.txt\") as f:", "texts[c] += t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this will aggregate phrases (t) into one", "as f: videoids = pickle.load(f) print \"\\nThe video IDs are composed of the", "\"\\n\" #ES: I don't think this function is ever called... # Call the", "long) (n) \") answer = verify_y_n_none(answer) if answer == 'y': fullSentenceSubtitles = True", "#print splits videoids = [] #videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M',", "overall (can lead to short, choppy, fast subtitles that are hard to read)", "except Exception as e: print e print \"\\nThe program is unable to resume", "\"\"\" to create a client secret file: google apis dashboard --> create a", "means that in the next time through the # \"for pa in range", "resource = build_resource(properties) # See full sample for function kwargs = remove_empty_kwargs(**kwargs) #", "for syncing with your video snippets? (y) \") answer = verify_y_n_none(answer) if answer", "cut transcript into snippets based on the transcript's timestamps (must be set to", "videoid = response['id'] elif method != 'insert' or 'id' not in response: print", "these subtitle files together into a single subtitle file for your video.\\n\\nYou may", "may switch these processes 'on' or 'off' depending on which steps you would", "504] # This method implements an exponential backoff strategy to resume a #", "handling retry logic ourselves. httplib2.RETRIES = 1 # Maximum number of times to", "True #ES: delete uploaded video snippets from your Youtube account once subtitle processing", "snippets will therefore be uploaded to YouTube for processing. YouTube allows a maximum", "reply = str(raw_input(question+' (y/n): ')).lower().strip() if reply[0] == 'y': return True if reply[0]", "\". \" + str(len(splits)) + \" video snippets will created. Continue?\" print \"\\nIf", "copy url id to script #playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language = 'fr' #change these", "print \"This tool:\\n- snips your transcript (.txt) into text snippets based on its", "== 2: if combine_only == True: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1)", "like \"snippet.title\", split into \"snippet\" and \"title\", where # \"snippet\" will be an", "'unlisted', 'status.publicStatsViewable': ''}, media_file, part='snippet,status') videoids.append(vid) print videoids #c += 1 wait =", "\"snippet\" object. Create the snippet object here. # Setting \"ref = ref[key]\" means", "if reply[0] == 'y': return True if reply[0] == '': return True if", "1 time.sleep(10) if combineSubtitles == True: #compiles them all print \"\\nCombining subtitle snippets", "the following two answers blank by pressing the 'Enter' key.\" time.sleep(1) interviewer =", "to create a client secret file: google apis dashboard --> create a new", "variable defines a message to display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE", "like your uploaded video snippets to be deleted from Youtube once subtitles have", "with playlist id, might need to create a playlist in youtube online and", "* sleepingTime / 60) + \" minutes...\" time.sleep(2 * sleepingTime) else: if downloadCaptions", "the end of each line (why?) t += \"\\n\" #ES: if the beginning", "= compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile = open(folderName + \"/\" + fileName + \".srt\", 'w')", "downloading the generated subtitle snippets from Youtube? (y) \") answer = verify_y_n_none(answer) if", "one of these status # codes is raised. RETRIABLE_STATUS_CODES = [500, 502, 503,", "video file in same folder #transcript text file \"oscar4.txt\" #might have to be", "\"_\" + str(c) + \".txt\", 'w') as thefile: #thefile = open(folderName + \"/\"", "for videos to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \"", "/ 60) + \" minutes...\" time.sleep(2 * sleepingTime) else: if downloadCaptions == True:", "(y) \") answer = verify_y_n_none(answer) if answer == 'y': uploadTranscripts = True elif", "properties without values out of inserted resource. if properties[p]: if is_array: ref[key] =", "(n) \") answer = verify_y_n_none(answer) if answer == 'y': fullSentenceSubtitles = True elif", "+ fileName + \".txt\", 'r') as myfile: text = myfile.read().replace('\\n', '') #print \"ES:", "full sample for function kwargs = remove_empty_kwargs(**kwargs) # See full sample for function", "the transcript: \") interviewee = raw_input(\"\\n6.3.2 Please input your interviewee's name as it", "# For example, the property is \"snippet.description\", and the resource # already has", "== True: args = argparser.parse_args() service = get_authenticated_service(args) def print_results(results): print(results) # Build", "== 'y': deleteVideos = True elif answer == 'n': deleteVideos = False elif", "+ str(text) #ES: strip whitespace text = [x.strip() for x in text] #split", "\" + str(2) + \" minutes...\" # time.sleep(120) sub_txt += subtitle cc =", "False elif answer == '': fullSentenceSubtitles = False answer = raw_input(\"\\n7.1.2 Would you", "= raw_input(\"\\n1/7 Will you be cutting your video into video snippets (y) \")", "least. #place-based time stamping can be set to True or False (make a", "'snippet.tags[]': '', 'snippet.title': fileName, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, folderName", "CLIENT_SECRETS_FILE = \"client_id.json\" # This OAuth 2.0 access scope allows for full read/write", "name as it appears in the transcript: \") interviewee = raw_input(\"\\n6.3.2 Please input", "problem with one of your timestamps:\" print \"Timestamp number #\",str(num+2),\" (equivalent to \",str(sp[1]),\"", "compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile = open(folderName + \"/\" + fileName + \".srt\",", "+ \".srt\", 'w') #thefile.write(compiledSubs) if uploadFull == True: print \"\\nUploading full video...\" vid", "tell the underlying HTTP transport library not to retry, since # we are", "str(len(splits)) + \" video snippets will created. Continue?\" print \"\\nIf all input was", "the API's captions.insert method to upload a caption track in draft status. def", "to resume or your 'videoids.pkl' file has gone missing. The program will restart", "will resume in \" + str(2 * sleepingTime / 60) + \" minutes...\"", "soon available on your Youtube channel for you to check and adjust captions.\"", "False #ES: IF you enabled 'resampleSubtitles' (above), you have the option to make", "excessively large) fullSentenceSubtitles = False #ES: IF you enabled 'resampleSubtitles' (above), you have", "\" text snippets based on it containing \" + str(len(splits)) + \" timestamps", "elif answer == 'n': placeBasedTimestamping = False elif answer == '': placeBasedTimestamping =", "True: with open(folderName + \"/\" + 'videoids.pkl', 'rb') as f: videoids = pickle.load(f)", "minutes...\" time.sleep(2 * sleepingTime) else: if downloadCaptions == True: with open(folderName + \"/\"", "splits videoids = [] #videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g',", "t0 = t1 elif len(t) == 3: #if we are only combining subtitle", "pipeline. Creating dummy file 'delete me.txt' to finish pipeline.\" foo = open(folderName +", "Continue?\" print \"\\nIf all input was correct, the program will begin snipping\" yes_or_no(question)", "If your transcript has speaker names (e.g. the interviewer or interviewee's names) that", "This variable defines a message to display if the CLIENT_SECRETS_FILE is # missing.", "httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine) # Always retry when an apiclient.errors.HttpError with one of these", "captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(2) + \"", "no uploads to resume or your 'videoids.pkl' file has gone missing. The program", "input your interviewer's name as it appears in the transcript: \") interviewee =", "'': placeBasedTimestamping = False print \"\\n\" folderName = raw_input(\"Enter the name of the", "account and requires requests to use an SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME", "range(0, len(prop_array)): is_array = False key = prop_array[pa] # Convert a name like", "with open(folderName + \"/\" + 'videoids.pkl', 'rb') as f: videoids = pickle.load(f) print", "text snippets as subtitle files (.vtt),\\n- stitches these subtitle files together into a", "snippets from Youtube? (y) \") answer = verify_y_n_none(answer) if answer == 'y': downloadCaptions", "credentials = storage.get() if credentials is None or credentials.invalid: credentials = run_flow(flow, storage,", "(must be set to True for other processes to run) snipTranscript = True", "''}, media_file, part='snippet,status') videoids.append(vid) print videoids #c += 1 wait = True with", "= False elif answer == '': snipVideos = True answer = raw_input(\"\\n2/7 Will", "the inserted resource. def build_resource(properties): resource = {} for p in properties: #", "+ 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) else: if resumeUploads == True or", "videoId=videoids[c-1], language=language, name=media_file, isDraft=True, sync=True ) ), media_body=caption_file ).execute() captionsids.append(a['id']) c += 1", "\" + str(2 * sleepingTime / 60) + \" minutes...\" time.sleep(2 * sleepingTime)", "+ str(2) + \" minutes...\" # time.sleep(120) sub_txt += subtitle cc = \"\"", "function request = service.videos().insert( body=resource, media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True), **kwargs ) vid = resumable_upload(request,", "me.txt\", 'r') as myfile: text = myfile.read().replace('\\n', '') with open(folderName + \"/\" +", "Hi, I'd like to ask you a few questions...\\\"), please input them. If", "message=MISSING_CLIENT_SECRETS_MESSAGE) storage = Storage(\"youtube-api-snippets-oauth2.json\") credentials = storage.get() if credentials is None or credentials.invalid:", "\"snippet\" object. ref[key] = {} ref = ref[key] else: # For example, the", "resume uploading the remaining snippets...)\" time.sleep(1) for s in splits: c += 1", "a \"snippet\" object. ref = ref[key] return resource # Remove keyword arguments that", "uploadTranscripts = True #ES: download snippet subtitle files (.vtt) downloadCaptions = True #ES:", "+ \"/\" + 'videoids.pkl', 'rb') as f: videoids = pickle.load(f) print \"\\nThe video", "a file that contains # the OAuth 2.0 information for this application, including", "Exception as e: print e print \"The file named '\" + fileName +", "+ str(cc) + \".vtt\", 'w') as thefile: #thefile.write(sub_txt) thefile.write(subtitle) if cc == \"31\":", "a digit and is not a next-line char #ES: removing punctuation from '[00:00:01.09]'", "of 100 video uploads per 24h using the current API credentials. Continue?\" print", "snipVideos = True answer = raw_input(\"\\n2/7 Will you be uploading video snippets to", "> 0: if sp[1] <= sp1[1]: print \"\\nThere is a problem with one", "+ folderName + \"'. Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2)", "print \"The file named '\" + fileName + \".txt' does not exist in", "'y': resumeUploads = True elif answer == 'n': resumeUploads = False elif answer", "(y) \") answer = verify_y_n_none(answer) if answer == 'y': uploadVideos = True snipVideos", "snipVideos == True or uploadTranscripts == True or resumeUploads == True or downloadCaptions", "str(len(splits)) + \" parts\" time.sleep(1) for s in splits: c += 1 if", "key-value pairs. # Leave properties with empty values out of the inserted resource.", "transcripts...\" for s in splits: print c,s media_file = folderName + '/' +", "resume in \" + str(2) + \" minutes...\" # time.sleep(120) sub_txt += subtitle", "t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t != \"\\n\": #ES: add t to position c of texts", "print cc with open(folderName + \"/\" + fileName + \"_\" + str(cc) +", "ref = ref[key] else: # For example, the property is \"snippet.description\", and the", "= True fileName = raw_input(\"In order to accurately combine subtitle files, you will", "= {} for p in properties: # Given a key like \"snippet.title\", split", "1 wait = True with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f:", "with open(folderName + \"/\" + \"delete me.txt\", 'r') as myfile: text = myfile.read().replace('\\n',", "the user has chosen to only combine subtitles. the switch combine_only allows some", "previously-initiated process? (n) \") answer = verify_y_n_none(answer) if answer == 'y': resumeUploads =", "of texts texts[c] += t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this will aggregate phrases (t)", "of properties given as key-value pairs. # Leave properties with empty values out", "resumeUploads = False elif answer == '': resumeUploads = False answer = raw_input(\"\\n4/7", "= os.stat(folderName).st_size except Exception as e: print e print \"The folder named '\"", "kwargs = remove_empty_kwargs(**kwargs) # See full sample for function results = service.playlistItems().insert( body=resource,", "youtube -ES if deleteVideos == True: print \"\\nDeleting videos...\\n\" c = 1 for", "is invalid\": reply = str(raw_input(question+' (y/n): ')).lower().strip() if reply[0] == 'y': return True", "method != 'insert' or 'id' not in response: print response else: exit(\"The upload", "\"\" if c < 10: cc = \"0\" + str(c) else: cc =", "open(folderName + \"/\" + \"delete me.txt\", 'r') as myfile: text = myfile.read().replace('\\n', '')", "on Mac).\" time.sleep(1) print \"\\n\" print \"This tool:\\n- snips your transcript (.txt) into", "I don't think this function is ever called... # Call the API's captions.insert", "raw_input(\"Enter the language code of your video and transcript or the intended language", "choppy, fast subtitles that are hard to read) resampleSubtitles = False #ES: IF", "videos only visible to your account,\\n- uploads the text snippets to Youtube as", "proceeding.\" time.sleep(1) print \"You may terminate the application at any point by pressing", "(.vtt) downloadCaptions = True #ES: delete uploaded video snippets from your Youtube account", "def s_to_hms(seconds): m, sec = divmod(seconds, 60) h, m = divmod(m, 60) #print", "key.\" time.sleep(1) interviewer = raw_input(\"\\n6.3.1 Please input your interviewer's name as it appears", "= [] #list of cut-up texts texts = [\"\"] t0 = 0 c", "kwargs is not None: for key, value in kwargs.iteritems(): if value: good_kwargs[key] =", "value in kwargs.iteritems(): if value: good_kwargs[key] = value return good_kwargs ### END BOILERPLATE", "credentials. def get_authenticated_service(args): flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage = Storage(\"youtube-api-snippets-oauth2.json\") credentials =", "= True wait = False def yes_or_no(question): while \"the answer is invalid\": reply", "# already has a \"snippet\" object. ref = ref[key] return resource # Remove", "\"delete me.txt\",\"w+\") foo.close() with open(folderName + \"/\" + \"delete me.txt\", 'r') as myfile:", "and uploading content to Youtube for processing. This may take between 20 minutes", "select which processes you would like to run: \\n\\n\" time.sleep(2) answer = raw_input(\"\\n1/7", "\"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME = \"youtube\" API_VERSION = \"v3\" # This variable defines a message", "placeBasedTimestamping = True elif answer == 'n': placeBasedTimestamping = False elif answer ==", "order to accurately combine subtitle files, you will need to create a list", "following\" + str(text) #ES: strip whitespace text = [x.strip() for x in text]", "thefile.write(\"%s\\n\" % texts[c-1]) #time.sleep(.1) texts.append(\"\") texts[c] = \"\" #t = t.replace(\" \", \"\")", "print response else: exit(\"The upload failed with an unexpected response: %s\" % response)", "exit() print \"\\n\" originalVideo = raw_input(\"Enter the file name of your video (this", "\":\" + str(int(m)) + \":\" + str(int(sec)) #ES: open anita/Anita.txt as myfile try:", "float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2])) t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2])) return", "text prior to timestamp) to thefile thefile.write(\"%s\\n\" % texts[c-1]) #time.sleep(.1) texts.append(\"\") texts[c] =", "qualified as a digit (False) and therefore the following condition is almost always", "= value return good_kwargs ### END BOILERPLATE CODE # Sample python code for", "authenticated user's account and requires requests to use an SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE =", "uploads these video snippets to Youtube as private videos only visible to your", "+ \"/\" + fileName + \".srt\", 'w') #thefile.write(compiledSubs) if uploadFull == True: print", "file (\" + str(videoSize) + \" Mb).\" yes_or_no(question) print \"\\n1. Slicing into \"", "continue def verify_y_n_none(a): while True: a = a.lower().strip() if a == 'y' or", "files are stored #folderName = 'venant' #fileName refers to the name of the", "property is \"snippet.description\", and the resource # already has a \"snippet\" object. ref", "Will you be downloading the generated subtitle snippets from Youtube? (y) \") answer", "have been successfully generated? (n) \") answer = verify_y_n_none(answer) if answer == 'y':", "ref = ref[key] return resource # Remove keyword arguments that are not set", "\"\") #t = t t = t.replace('[','').replace(']','').replace('\\n','') t = unicode(t, \"UTF-8\") #split the", "minutes and several hours, depending on the size of your video file (\"", "videoids #c += 1 wait = True with open(folderName + \"/\" + 'videoids.pkl',", "gone over README.md before proceeding.\" time.sleep(1) print \"You may terminate the application at", "error = \"A retriable HTTP error %d occurred:\\n%s\" % (e.resp.status,e.content) else: raise except", "status, response = request.next_chunk() if response is not None: if method == 'insert'", "+ \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) if wait == True:", "= True) #ES A feature created by RG that has yet to be", "into snippets based on the transcript's timestamps (must be set to True for", "sample for function return vid def hms_to_s(time): time = unicode(time, \"UTF-8\") time =", "that precede their discourse (e.g. \\\"Emmanuel: Hi, I'd like to ask you a", "to continue where you left off (uploadVideos must still be set to True):", "+ str(c) + \" \" + captionsids[c-1] + \" to be processed into", "if credentials is None or credentials.invalid: credentials = run_flow(flow, storage, args) # Trusted", "files, you will need to create a list of timestamps demarcating the length", "2: if combine_only == True: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1) t0", "For more advanced users or users who have already used this tool, please", "print \"\\nWaiting for videos to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume", "the request and store authorization credentials. def get_authenticated_service(args): flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE)", "is a feature that needs exploration so as to make sure that place", "or 'off' depending on which steps you would like to run. If this", "you be resuming video uploads from a previously-initiated process? (n) \") answer =", "Exiting...\" exit() #ES: UPLOADS THE VIDEOS if uploadVideos == True: #ES: the following", "Youtube for syncing? (y) \") answer = verify_y_n_none(answer) if answer == 'y': uploadVideos", "but it is smaller.\" print \"Please make sure your timestamps are in ascending", "is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(2 * sleepingTime / 60)", "uploaded. question = \"\\nThere were \" + str(len(splits)) + \" timestamps detected in", "True with open(folderName + \"/\" + 'captionsids.pkl', 'wb') as f: pickle.dump(captionsids, f) print", "you be uploading video snippets to Youtube for syncing? (y) \") answer =", "input .txt file (excluding .txt) #fileName = 'venant' #originalVideo refers to the name", "== True: answer = raw_input(\"\\n7.1.1 Would you like to reorganize subtitles to prioritize", "to read) resampleSubtitles = False #ES: IF you enabled 'resampleSubtitles' (above), you have", "= raw_input(\"Enter the file name of your transcript (excluding the \\\".txt\\\" extention): \")", "be removed. #ES: a list of the transcript's timestamps t_list = [] #ES:", "+ int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t1 except ValueError as e: print", "If this is your first time running the tool, simply leave the following", "resample subtitles to prevent cut-up phrases, lone-word subtitles, and improve the subtitle structure", "'n', or leave the answer blank by hitting 'Enter': \") continue print \"\\n\\n\"", "single subtitle file for your video.\\n\\nYou may switch these processes 'on' or 'off'", "as it appears in the transcript: \") interviewee = raw_input(\"\\n6.3.2 Please input your", "See full sample for function request = service.videos().insert( body=resource, media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True), **kwargs", "raw_input(\"Please answer 'y' or 'n': \") continue def verify_y_n_none(a): while True: a =", "error = \"A retriable error occurred: %s\" % e if error is not", "fileName + \"_\" + str(c) + \".txt\", 'w') try: #ES: write the previous", "your timestamps isn't formatted correctly. Consult README.md for guidelines on proper timestamp formatting.\"", "\"/\" + 'videoids.pkl', 'rb') as f:videoids = pickle.load(f) except Exception as e: print", "in ref: # For example, the property is \"snippet.title\", but the resource does", "\"/\" + \"delete me.txt\",\"w+\") foo.close() with open(folderName + \"/\" + \"delete me.txt\", 'r')", "== True: with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f)", "an adjacent subtitle (verify)) removeLoneWords = False #____________# #ES: USER INTERVIEW SECTION def", "read) (n) \") answer = verify_y_n_none(answer) if answer == 'y': resampleSubtitles = True", "if retry > MAX_RETRIES: exit(\"No longer attempting to retry.\") max_sleep = 2 **", "fileName + \".txt\", 'r') as myfile: text = myfile.read().replace('\\n', '') #print \"ES: replace", "\"YouTube Data API v3\" and ENABLE it click \"create credentials\" create and \"OAUT", "in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) #search_response = service.search().list( # q=\"Anita\",", "key[0:len(key)-2:] is_array = True if pa == (len(prop_array) - 1): # Leave properties", "it is smaller.\" print \"Please make sure your timestamps are in ascending order", "resumeUploads == True: print \"\\nResuming video uploads...\\n\" time.sleep(1) try: with open(folderName + \"/\"", "2.0 access scope allows for full read/write access to the # authenticated user's", "== True: #print splits,videoids #uploads transcripts print \"\\nUploading transcripts...\" for s in splits:", "\"\\nDeleting videos...\\n\" c = 1 for s in splits: print c,videoids[c-1] service.videos().delete( id=videoids[c-1]", "when videos are being uploaded (uploadVideos = True) to warn the user as", "# #for search_result in search_response.get(\"items\", []): # videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"])) # #print \"Videos:\\n\",", "language, name, file): insert_result = youtube.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=video_id, language=language, name=name, isDraft=True", "on texts by 1 c += 1 #ES: printing deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print", "(y) \") answer = verify_y_n_none(answer) if answer == 'y': snipVideos = True elif", "files (.vtt),\\n- stitches these subtitle files together into a single subtitle file for", "+ str(len(splits)) + \" timestamps detected in \" + fileName + \". \"", "#ES: upload snippet transcripts (.txt) uploadTranscripts = True #ES: download snippet subtitle files", "the text snippets as subtitle files (.vtt),\\n- stitches these subtitle files together into", "This OAuth 2.0 access scope allows for full read/write access to the #", "# Always retry when an apiclient.errors.HttpError with one of these status # codes", "full read/write access to the # authenticated user's account and requires requests to", "> MAX_RETRIES: exit(\"No longer attempting to retry.\") max_sleep = 2 ** retry sleep_seconds", "texts.append(\"\") texts[c] = \"\" #t = t.replace(\" \", \"\") #t = t t", "#videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw',", "+= 1 time.sleep(3) #deletes videos from youtube -ES if deleteVideos == True: print", "\" + str(len(splits)) + \" text snippets based on it containing \" +", "remove_empty_kwargs(**kwargs) # See full sample for function results = service.playlistItems().insert( body=resource, **kwargs ).execute()", "if answer == 'y': uploadVideos = True snipVideos = True elif answer ==", "language code for your transcript and video files. Youtube will use this code", "True): resumeUploads = False #ES: upload snippet transcripts (.txt) uploadTranscripts = True #ES:", "'y' or 'n': \") continue def verify_y_n_none(a): while True: a = a.lower().strip() if", "True fileName = raw_input(\"In order to accurately combine subtitle files, you will need", "to short, choppy, fast subtitles that are hard to read) resampleSubtitles = False", "to accurately combine subtitle files, you will need to create a list of", "values out of inserted resource. if properties[p]: if is_array: ref[key] = properties[p].split(',') else:", "the variable 'snipTranscript' to True so that the code can properly run.\" exit()", "isn't formatted correctly. Consult README.md for guidelines on proper timestamp formatting.\" print \"\\nVerifying", "this tool, please select which processes you would like to run: \\n\\n\" time.sleep(2)", "c,videoids[c-1] service.videos().delete( id=videoids[c-1] ).execute() c += 1 time.sleep(10) if combineSubtitles == True: #compiles", "print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print splits #print \"Wait\" #time.sleep(30) c =", "\"O.G.\" #folderName = 'oscar' #fileName = 'oscar' #originalVideo = \"Oscar.mp4\" ### START BOILERPLATE", "please input them. If this does not apply to your transcript, simply leave", "word (and put them in an adjacent subtitle (verify)) removeLoneWords = False #____________#", "[x.strip() for x in text] #split times (?) splits = [] #list of", "uploads from a previously-initiated process? (n) \") answer = verify_y_n_none(answer) if answer ==", "combineSubtitles = True if combineSubtitles == True: answer = raw_input(\"\\n7.1 Would you like", "CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\" # Authorize", "+ \"/\" + 'captionsids.pkl', 'wb') as f: pickle.dump(captionsids, f) print \"Waiting for transcripts", "deleteVideos == True: args = argparser.parse_args() service = get_authenticated_service(args) def print_results(results): print(results) #", "to retry before giving up. MAX_RETRIES = 10 # Always retry when these", "file): insert_result = youtube.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=video_id, language=language, name=name, isDraft=True ) ),", "= pickle.load(f) print \"\\nThe video IDs are composed of the following: \" +", "timestamps are in ascending order and that there are no mistakes (see README.md)", "so, do this manually on youtube.com and then restart the program.\" uploadVideos =", "Sample Python code for user authorization import httplib2 import os import sys import", "up. MAX_RETRIES = 10 # Always retry when these exceptions are raised. RETRIABLE_EXCEPTIONS", "\" parts & uploading videos...\" time.sleep(1) if len(videoids) > 0: print \"(However, it", "blank by pressing the 'Enter' key.\" time.sleep(1) interviewer = raw_input(\"\\n6.3.1 Please input your", "files, and we are using a .txt file with a list of video", "= True elif answer == 'n': downloadCaptions = False elif answer == '':", "text is the following\" + str(text) #ES: strip whitespace text = [x.strip() for", "#print \"Videos:\\n\", \"\\n\".join(videos), \"\\n\" #ES: I don't think this function is ever called...", "name of your video (this time including the file's extention): \") try: verifyExistence", "print \"Waiting for transcripts to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script", "is a problem with one of your timestamps:\" print \"Timestamp number #\",str(num+2),\" (equivalent", "Exception as e: print e print \"The folder named '\" + folderName +", "+ \" minutes...\" # time.sleep(120) sub_txt += subtitle cc = \"\" if c", "True: with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) if", "message to display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please", "warn the user as to how many videos will be uploaded. question =", "have indicated, you will temporarily require \" + str(videoSize) + \" Mb available", "#originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer = \"S.G.\" #interviewee = \"O.G.\" #folderName = 'oscar' #fileName", "\"\\nDownloading captions...\" c = 1 waitLonger = True for s in splits: print", "!= \"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\" in t: #increase pos on texts by", "the entire pipeline. Creating dummy file 'delete me.txt' to finish pipeline.\" foo =", "language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': fileName, 'status.embeddable': '', 'status.license':", "media_body=caption_file ).execute() print \"\\nFull video is soon available on your Youtube channel for", "\",len(videoids),\" video snippets were already uploaded to Youtube. Now trying to resume uploading", "#print splits #print \"Wait\" #time.sleep(30) c = 0 #print splits videoids = []", "correct, the program will begin snipping\" yes_or_no(question) print \"\\n1. Slicing into \" +", "+ \" \" + captionsids[c-1] + \" to be processed into captions. It", "since it is never qualified as a digit (False) and therefore the following", "list item (a text) until a timestamp is reached #ES: if t is", "if resampleSubtitles == True: answer = raw_input(\"\\n7.1.1 Would you like to reorganize subtitles", "== 'n': fullSentenceSubtitles = False elif answer == '': fullSentenceSubtitles = False answer", "False #ES: resample subtitles to prevent cut-up phrases, lone-word subtitles, and improve the", "texts[c] #print \"splits: \" + str(splits) #for i in splits: # print s_to_hms(i[0]),\"->\",s_to_hms(i[1])", "exit() sp1 = sp num+=1 print \"\\nThe document named '\" + fileName +", "originalVideo, s[0], s[1], targetname=folderName + \"/\" + fileName + \"_\" + str(c) +\".mp4\")", "service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=videoids[c-1], language=language, name=media_file, isDraft=True, sync=True ) ), media_body=caption_file ).execute()", "== True: answer = raw_input(\"\\n7.1 Would you like to reorganize subtitles according to", "video lengths, then we need to make this into a list of cumulative", "you have indicated, you will temporarily require \" + str(videoSize) + \" Mb", "\"\\nUploading full video...\" vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.description': 'Description of", "\" + str(len(splits)) + \" parts\" time.sleep(1) for s in splits: c +=", "= verify_y_n_none(answer) if answer == 'y': snipVideos = True elif answer == 'n':", "def print_results(results): print(results) # Build a resource based on a list of properties", "== \"31\": print subtitle c += 1 time.sleep(3) #deletes videos from youtube -ES", "subtitle units tend to become excessively long) (n) \") answer = verify_y_n_none(answer) if", "c > len(videoids): ffmpeg_extract_subclip(folderName + \"/\" + originalVideo, s[0], s[1], targetname=folderName + \"/\"", "ref: # For example, the property is \"snippet.title\", but the resource does #", "altered when concatenating snippets (i.e. when combineSubtitles = True) #ES A feature created", "follows [HH:MM:SS.00], followed by a newline.\\n\\nPlease enter the file name of your timestamp", "+ int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1) t0 = t0 + t1 else: t1 = int(t[0])*60", "fileName + \". \" + str(len(splits)) + \" video snippets will created. Continue?\"", "id '%s' was successfully uploaded.\" % response['id'] videoid = response['id'] elif method !=", "str(c) + \".mp4\" if not os.path.exists(media_file): exit('Please specify a valid file location.') vid", "= False #ES: combine vtt snippets that were downloaded from Youtube into a", "'id' in response: print \"Video id '%s' was successfully uploaded.\" % response['id'] videoid", "caption_file = folderName + '/' + fileName + \"_\" + str(c) + \".txt\"", "sp1 = 0 num = 0 #print str(splits) #print str(t_list) for sp in", "specify a valid file location.') print \"\\nSnipping completed. No further options were selected.", "there are no mistakes (see README.md) and restart the program.\" exit() sp1 =", "location.') print \"\\nSnipping completed. No further options were selected. Exiting...\" exit() #ES: UPLOADS", "have gone over README.md before proceeding.\" time.sleep(1) print \"You may terminate the application", "the input .txt file (excluding .txt) #fileName = 'venant' #originalVideo refers to the", "'': downloadCaptions = True answer = raw_input(\"\\n6/7 Would you like your uploaded video", "process was terminated unexpectedly before and you want to continue where you left", "# Maximum number of times to retry before giving up. MAX_RETRIES = 10", "as e: print e print \"\\n One of your timestamps isn't formatted correctly.", "videos. You may need to remove any previously-uploaded videos if the videos you", "resumeUploads == True or downloadCaptions == True or deleteVideos == True: combine_only =", "is not recommended since subtitle units tend to become excessively long) (n) \")", "= False elif answer == '': removeLoneWords = False answer = raw_input(\"\\n7.2 Would", "int(t[0])*60 + int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1) t0 = t0 + t1 else: t1 =", "639-1' language code.)\\n\") if language != '': verifyLanguage = raw_input(\"\\nYou have entered '\"", "formatted like such '[HH:MM:SS.00]'.\" else: print \"Please set the variable 'snipTranscript' to True", "snippets as subtitle files (.vtt),\\n- stitches these subtitle files together into a single", "phrases, lone-word subtitles, and improve the subtitle structure overall (can lead to short,", "as thefile: #thefile.write(sub_txt) thefile.write(subtitle) if cc == \"31\": print subtitle c += 1", "cc == \"31\": print subtitle c += 1 time.sleep(3) #deletes videos from youtube", "variables import imageio imageio.plugins.ffmpeg.download() from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip import time from time import", "u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with open(folderName +", "code for your transcript and video files. Youtube will use this code for", "timestamp should be written as follows [HH:MM:SS.00], followed by a newline.\\n\\nPlease enter the", "has a \"snippet\" object. ref = ref[key] return resource # Remove keyword arguments", "t.replace('[','').replace(']','').replace('\\n','') t = unicode(t, \"UTF-8\") #split the timestamps at : (into 3) t", "instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" if snipVideos == True or", "See full sample for function results = service.playlistItems().insert( body=resource, **kwargs ).execute() print_results(results) #'snippet.playlistId':", "resource based on a list of properties given as key-value pairs. # Leave", "'w') #thefile.write(compiledSubs) if uploadFull == True: print \"\\nUploading full video...\" vid = videos_insert(", "'snippet.resourceId.videoId': vid, 'snippet.position': ''}, part='snippet', onBehalfOfContentOwner='') print \"Waiting for full video to be", "for your video.\\n\\nYou may switch these processes 'on' or 'off' depending on which", "Now trying to resume uploading the remaining snippets...)\" time.sleep(1) for s in splits:", "'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': fileName, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus':", "exit('Please specify a valid file location.') vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language,", "chosen any options for running this application. Exiting...\" exit() while True: language =", "Always retry when an apiclient.errors.HttpError with one of these status # codes is", "int(t[1]) splits.append([t0,t1]) t_list.append(t1) t0 = t1 elif len(t) == 3: #if we are", "= \"M.M.\" #interviewee = \"B.K.\" #fileName = 'Berthe' #originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer =", "text snippets based on its timestamps,\\n- snips the associated video accordingly into video", "\") print \"\\n\" #____________# # let rodolphe know if there is a problem", "snippet subtitle files (.vtt) downloadCaptions = True #ES: delete uploaded video snippets from", "elif answer == '': resumeUploads = False answer = raw_input(\"\\n4/7 Will you be", "onBehalfOfContentOwner='') print \"Waiting for full video to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script", "unable to resume uploads because there are no uploads to resume or your", "think this function is ever called... # Call the API's captions.insert method to", "c = 1 captionsids = [] wait = False if uploadTranscripts == True:", "\" + str(len(splits)) + \" parts & uploading videos...\" time.sleep(1) if len(videoids) >", "== True: # try: subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger = False # except:", "#deletes videos from youtube -ES if deleteVideos == True: print \"\\nDeleting videos...\\n\" c", "entries full sentences (not recommended, since some timestamp/subtitle units can end up being", "transcript (excluding the \\\".txt\\\" extention): \") try: verifyExistence = os.stat(folderName + '/' +", "punctuation? (Experimental; can lead to short, choppy, fast subtitles that are hard to", "waitLonger = True # print \"Waiting for transcripts \" + str(c) + \"", "interviewer or interviewee's names) that precede their discourse (e.g. \\\"Emmanuel: Hi, I'd like", "a # failed upload. def resumable_upload(request, resource, method): response = None error =", "FOR PROCESSING if snipTranscript == True: for t in text: #add a \\n", "\",str(sp[1]),\" seconds) should be a larger number than the timestamp that comes before", "is not a next-line char #ES: removing punctuation from '[00:00:01.09]' since it is", "in draft status. def upload_caption(youtube, video_id, language, name, file): insert_result = youtube.captions().insert( part=\"snippet\",", "answer == 'y': uploadVideos = True snipVideos = True elif answer == 'n':", "with a list of video lengths, then we need to make this into", "all videos. You may need to remove any previously-uploaded videos if the videos", "\" minutes...\" # time.sleep(120) sub_txt += subtitle cc = \"\" if c <", "removeLoneWords = False answer = raw_input(\"\\n7.2 Would you like to reorganize subtitles according", "if c < 10: cc = \"0\" + str(c) else: cc = str(c)", "+ fileName + \".srt\", 'w') #thefile.write(compiledSubs) if uploadFull == True: print \"\\nUploading full", "fileName, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, folderName + \"/\" +", "for function results = service.playlistItems().insert( body=resource, **kwargs ).execute() print_results(results) #'snippet.playlistId': playlistID, playlist_items_insert( {'snippet.resourceId.kind':", "False #____________# #ES: USER INTERVIEW SECTION def verify_y_n(a): while True: a = a.lower().strip()", "like keys\" search for youtube api click \"YouTube Data API v3\" and ENABLE", "+ \".txt' does not exist in the folder '\" + folderName + \"'.", "#for i in splits: # print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print splits", "or a == 'n': return a else: a = raw_input(\"Please answer 'y' or", "as f: captionsids = pickle.load(f) #if wait == True: if downloadCaptions == True:", "\"\" # while waitLonger == True: # try: subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger", "these video snippets,\\n- allows Youtube to sync the video and text snippets\\n- downloads", "#print \"ES: replace \\\\n with ''\" with open(folderName + \"/\" + fileName +", "credentials.invalid: credentials = run_flow(flow, storage, args) # Trusted testers can download this discovery", "properties[p] elif key not in ref: # For example, the property is \"snippet.title\",", "\"the answer is invalid\": reply = str(raw_input(question+' (y/n): ')).lower().strip() if reply[0] == 'y':", "= os.stat(folderName + '/' + fileName + '.txt').st_size except Exception as e: print", "answer = verify_y_n_none(answer) if answer == 'y': uploadVideos = True snipVideos = True", "= \"venant.mp4\" #interviewer = \"E.H.\" #interviewee = \"E.M.\" #fileName = 'Frederic' #originalVideo =", "+ \"delete me.txt\") as f: text = f.readlines() pass #print \"ES: text is", "\"S.G.\" #interviewee = \"O.G.\" #folderName = 'oscar' #fileName = 'oscar' #originalVideo = \"Oscar.mp4\"", "elif answer == '': combineSubtitles = True if combineSubtitles == True: answer =", "subtitle file. combineSubtitles = True #ES: the following switches control how subtitles are", "answer 'y' or 'n': \") continue def verify_y_n_none(a): while True: a = a.lower().strip()", "application. Exiting...\" exit() while True: language = raw_input(\"Enter the language code of your", "be set to True): resumeUploads = False #ES: upload snippet transcripts (.txt) uploadTranscripts", "\"\"\" #CLIENT_SECRETS_FILE = \"client_secret.json\" #api key is <KEY> #client id is in client_id.json", "an object and \"title\" will be a property in that object. prop_array =", "is \"snippet.title\", but the resource does # not yet have a \"snippet\" object.", "be uploading text snippets for syncing with your video snippets? (y) \") answer", "and that there are no mistakes (see README.md) and restart the program.\" exit()", "\") answer = verify_y_n_none(answer) if answer == 'y': combineSubtitles = True elif answer", "60) #print str(int(h)) + \":\" + str(int(m)) + \":\" + str(int(s)) return str(int(h))", "part='snippet,status') videoids.append(vid) print videoids #c += 1 wait = True with open(folderName +", "drive, and then restart the program.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\"", "a timestamp is reached #ES: if t is a timestamp #ES: removing punctuation", "= raw_input(\"Please answer 'y' or 'n', or leave the answer blank by hitting", "the program.\" exit() sp1 = sp num+=1 print \"\\nThe document named '\" +", "u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc',", "\"/\" + 'videoids.pkl', 'wb') as f: # pickle.dump(videoids, f) if resumeUploads == True:", "reply[0] == 'n': exit() if uploadVideos == False and snipVideos == True: #ES:", "the second column in http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate two-letter 'ISO 639-1' language code.)\\n\")", "application...\" time.sleep(2) exit() if len(t) == 2: if combine_only == True: t1 =", "restart the program.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" elif combineSubtitles ==", "it looks like \",len(videoids),\" video snippets were already uploaded to Youtube. Now trying", "downloaded from Youtube into a total subtitle file. combineSubtitles = True #ES: the", "s in splits: print c,videoids[c-1] service.videos().delete( id=videoids[c-1] ).execute() c += 1 time.sleep(10) if", "= raw_input(\"\\n7/7 Will you be combining the downloaded subtitle snippets into a single", "str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) id = vid print \"\\nUploading compiled subtitles...\" caption_file", "are stored #folderName = 'venant' #fileName refers to the name of the input", "language code.)\\n\") if language != '': verifyLanguage = raw_input(\"\\nYou have entered '\" +", "met. if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t != \"\\n\": #ES: add t to position", "single word (and put them in an adjacent subtitle (verify)) removeLoneWords = False", "name=originalVideo, isDraft=True, sync=False ) ), media_body=caption_file ).execute() print \"\\nFull video is soon available", "transcript. Make sure you have gone over README.md before proceeding.\" time.sleep(1) print \"You", "print \"\\n\" folderName = raw_input(\"Enter the name of the folder containing your transcript", "punctuation from '[00:00:01.09]' since it is never qualified as a digit (False) and", "True answer = raw_input(\"\\n5/7 Will you be downloading the generated subtitle snippets from", "fileName + \"_\" + str(c) + \".mp4\" if not os.path.exists(media_file): exit('Please specify a", "Make sure you have gone over README.md before proceeding.\" time.sleep(1) print \"You may", "subtitle files together into a single subtitle file for your video.\\n\\nYou may switch", "True or uploadTranscripts == True or resumeUploads == True or downloadCaptions == True", "the 'Enter' key.\" time.sleep(1) interviewer = raw_input(\"\\n6.3.1 Please input your interviewer's name as", "#print splits,videoids #uploads transcripts print \"\\nUploading transcripts...\" for s in splits: print c,s", "body=dict( snippet=dict( videoId=videoids[c-1], language=language, name=media_file, isDraft=True, sync=True ) ), media_body=caption_file ).execute() captionsids.append(a['id']) c", "import HttpError from apiclient.http import MediaFileUpload from oauth2client.client import flow_from_clientsecrets from oauth2client.file import", "uploading all videos. You may need to remove any previously-uploaded videos if the", "video into snippets based on the transcript's timestamps (must be set to True", "+ fileName + \".txt' does not exist in the folder '\" + folderName", "video and txt files are stored #folderName = 'venant' #fileName refers to the", "a property in the # resource's \"snippet\" object. ref[key] = {} ref =", "'n': placeBasedTimestamping = False elif answer == '': placeBasedTimestamping = False print \"\\n\"", "\"The file named '\" + originalVideo + \"' does not exist in the", "u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with open(folderName + \"/\"", "subtitles are altered when concatenating snippets (i.e. when combineSubtitles = True) #ES A", "run this program. Continue? (y/n) \") answer = verify_y_n(answer) if answer == \"n\":", "processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(2", "video snippets to be deleted from Youtube once subtitles have been successfully generated?", "playlist id, might need to create a playlist in youtube online and copy", "were added for guidance. they can be removed. #ES: a list of the", "= t.split(\":\") if len(t) > 3 or len(t) < 3: print \"\\nOne of", "+ fileName + '.txt').st_size except Exception as e: print e print \"The file", "# print \"Waiting for transcripts \" + str(c) + \" \" + captionsids[c-1]", "following switches control how subtitles are altered when concatenating snippets (i.e. when combineSubtitles", "language code of your video and transcript or the intended language code of", "and then restart the program.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" elif", "\") try: verifyExistence = os.stat(folderName + '/' + fileName + '.txt').st_size except Exception", "+ int(t[2]) t_list.append(t1) t0 = t1 except ValueError as e: print e print", "u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA',", "#add a \\n to the end of each line (why?) t += \"\\n\"", "print \"\\n One of your timestamps isn't formatted correctly. Consult README.md for guidelines", "True or uploadTranscripts == True: with open(folderName + \"/\" + 'videoids.pkl', 'rb') as", "good_kwargs ### END BOILERPLATE CODE # Sample python code for videos.insert def videos_insert(properties,", "a.lower().strip() if a == 'y' or a == 'n' or a == '':", "or uploadTranscripts == True: with open(folderName + \"/\" + 'videoids.pkl', 'rb') as f:", "= raw_input(\"\\n7.2 Would you like to reorganize subtitles according to the presence of", "resource's \"snippet\" object. ref[key] = {} ref = ref[key] else: # For example,", "'snippet.defaultLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': fileName, 'status.embeddable': '',", "library not to retry, since # we are handling retry logic ourselves. httplib2.RETRIES", "of a file that contains # the OAuth 2.0 information for this application,", "(n) \") answer = verify_y_n_none(answer) if answer == 'y': deleteVideos = True elif", "on a list of properties given as key-value pairs. # Leave properties with", "transcript into snippets based on the transcript's timestamps (must be set to True", "to the end of each line (why?) t += \"\\n\" #ES: if the", "snippets to Youtube as private videos only visible to your account,\\n- uploads the", "if answer == 'y': resumeUploads = True elif answer == 'n': resumeUploads =", "'': removeLoneWords = False answer = raw_input(\"\\n7.2 Would you like to reorganize subtitles", "< 3: print \"\\nOne of your timestamps (\",':'.join(t) ,\") isn't formatted correctly. Consult", "def get_authenticated_service(args): flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage = Storage(\"youtube-api-snippets-oauth2.json\") credentials = storage.get()", "Build a resource based on a list of properties given as key-value pairs.", "< 10: cc = \"0\" + str(c) else: cc = str(c) #print subtitle", "of your video and transcript or the intended language code of your subtitles", "print \"\\nDownloading captions...\" c = 1 waitLonger = True for s in splits:", "snippets (i.e. when combineSubtitles = True) #ES A feature created by RG that", "a digit (False) and therefore the following condition is never met. if t", "might need to create a playlist in youtube online and copy url id", "+ \".txt\", 'w') as thefile: #thefile = open(folderName + \"/\" + fileName +", "% (e.resp.status,e.content) else: raise except RETRIABLE_EXCEPTIONS, e: error = \"A retriable error occurred:", "Convert a name like \"snippet.tags[]\" to snippet.tags, but handle # the value as", "# and it should be in the same directory with the code. return", "has chosen to only combine subtitles. the switch combine_only allows some different functionality", "tool on the files you have indicated, you will temporarily require \" +", "print \"Please make sure your timestamps are in ascending order and that there", "the intended language code of your subtitles (e.g. en, fr, es, etc.):\\n(You can", "deleteVideos == True: print \"\\nDeleting videos...\\n\" c = 1 for s in splits:", "+ int(float(t_1[2])) return [t0,t1] def s_to_hms(seconds): m, sec = divmod(seconds, 60) h, m", "\"\\n1. Slicing into \" + str(len(splits)) + \" parts & uploading videos...\" time.sleep(1)", "that were downloaded from Youtube into a total subtitle file. combineSubtitles = True", "+ '/' + fileName + '.txt').st_size except Exception as e: print e print", "previous position of c in texts (a chunk of text prior to timestamp)", "as f: text = f.readlines() except IOError as e: print \"No text file", "with one of these status # codes is raised. RETRIABLE_STATUS_CODES = [500, 502,", "print \"\\nUploading full video...\" vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.description': 'Description", "\"ES: replace \\\\n with ''\" with open(folderName + \"/\" + fileName + \".txt\")", "the name of the folder containing your transcript and/or video and/or subtitle files\\n(this", "== True: if downloadCaptions == True: print \"\\nDownloading captions...\" c = 1 waitLonger", "+ \"_\" + str(c) + \".txt\", 'w') try: #ES: write the previous position", "which you have an associated transcript. Make sure you have gone over README.md", "videoids if resumeUploads == True or deleteVideos == True or uploadTranscripts == True:", "response is not None: if method == 'insert' and 'id' in response: print", "[u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c',", "directory with the code. return build(API_SERVICE_NAME, API_VERSION, http=credentials.authorize(httplib2.Http())) # Explicitly tell the underlying", ").execute() captionsids.append(a['id']) c += 1 #print a wait = True with open(folderName +", "if answer == 'y': combineSubtitles = True elif answer == 'n': combineSubtitles =", "range ...\" loop, we will be setting a property in the # resource's", "to retry.\") max_sleep = 2 ** retry sleep_seconds = random.random() * max_sleep print", "True elif answer == 'n': resumeUploads = False elif answer == '': resumeUploads", "created by RG that has yet to be explored... placeBasedTimestamping = False #ES:", "\") interviewee = raw_input(\"\\n6.3.2 Please input your interviewee's name as it appears in", "\"The folder named '\" + folderName + \"' does not exist in the", "is almost always met. if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t != \"\\n\": #ES: add", "program will restart by uploading all videos. You may need to remove any", "return True if reply[0] == 'n': exit() if uploadVideos == False and snipVideos", "= [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM',", "\"/\" + fileName + \"_\" + str(c) + \".txt\", 'w') try: #ES: write", "if snipVideos == True or uploadTranscripts == True or resumeUploads == True or", "snippets? (y) \") answer = verify_y_n_none(answer) if answer == 'y': uploadTranscripts = True", "resumeUploads == True or downloadCaptions == True or deleteVideos == True: args =", "http=credentials.authorize(httplib2.Http())) # Explicitly tell the underlying HTTP transport library not to retry, since", "== '': downloadCaptions = True answer = raw_input(\"\\n6/7 Would you like your uploaded", "or 'y': break #if combineSubtitles == True: print \"\\n\\n\" print \"\\n6.3 If your", "\"\\nThere were \" + str(len(splits)) + \" timestamps detected in \" + fileName", "\". \" + str(len(splits)) + \" video snippets will therefore be uploaded to", "0 #ES: several print commands were added for guidance. they can be removed.", "ref[key] = properties[p].split(',') else: ref[key] = properties[p] elif key not in ref: #", "(Experimental) (n) \") answer = verify_y_n_none(answer) if answer == 'y': removeLoneWords = True", "\"snippet.title\", but the resource does # not yet have a \"snippet\" object. Create", "+ \"_\" + str(cc) + \".vtt\", 'w') as thefile: #thefile.write(sub_txt) thefile.write(subtitle) if cc", "by a newline.\\n\\nPlease enter the file name of your timestamp list (excluding the", "'delete me.txt' to finish pipeline.\" foo = open(folderName + \"/\" + \"delete me.txt\",\"w+\")", "IDs are composed of the following: \" + str(videoids) #print videoids if resumeUploads", "== 'n' or a == '': return a else: a = raw_input(\"Please answer", "not exist in the current directory. Please see README.md for instructions.\" print \"exiting", "'ISO 639-1' language code.)\\n\") if language != '': verifyLanguage = raw_input(\"\\nYou have entered", "the road combine_only = True fileName = raw_input(\"In order to accurately combine subtitle", "'22', 'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title':", "or a == '': return a else: a = raw_input(\"Please answer 'y' or", "str(text) #ES: strip whitespace text = [x.strip() for x in text] #split times", "timestamp #ES: removing punctuation from '[00:00:01.09]' since it is never qualified as a", "cc with open(folderName + \"/\" + fileName + \"_\" + str(cc) + \".vtt\",", "of your video file (\" + str(videoSize) + \" Mb).\" yes_or_no(question) print \"\\n1.", "then we need to make this into a list of cumulative times so", "+ \"' does not exist in the current directory. Please see README.md for", "t.replace(\" \", \"\") #t = t t = t.replace('[','').replace(']','').replace('\\n','') t = unicode(t, \"UTF-8\")", "'y': resampleSubtitles = True elif answer == 'n': resampleSubtitles = False elif answer", "Youtube for processing. This may take between 20 minutes and several hours, depending", "* max_sleep print \"Sleeping %f seconds and then retrying...\" % sleep_seconds time.sleep(sleep_seconds) return", "elif answer == 'n': uploadTranscripts = False elif answer == '': uploadTranscripts =", "#).execute() # #videos = [] # #for search_result in search_response.get(\"items\", []): # videos.append(\"%s\"", "= verify_y_n_none(answer) if answer == 'y': downloadCaptions = True elif answer == 'n':", "intended language code of your subtitles (e.g. en, fr, es, etc.):\\n(You can refer", "the file's extention): \") try: verifyExistence = os.stat(folderName + '/' + originalVideo).st_size except", "your interviewer's name as it appears in the transcript: \") interviewee = raw_input(\"\\n6.3.2", "= myfile.read().replace('\\n', '') with open(folderName + \"/\" + \"delete me.txt\") as f: text", "in splits: print c,videoids[c-1] service.videos().delete( id=videoids[c-1] ).execute() c += 1 time.sleep(10) if combineSubtitles", "file name of your video (this time including the file's extention): \") try:", "and therefore the following condition is almost always met. if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and", "\"/\" + originalVideo, s[0], s[1], targetname=folderName + \"/\" + fileName + \"_\" +", "a = service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=videoids[c-1], language=language, name=media_file, isDraft=True, sync=True ) ),", "'snippet.defaultAudioLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': media_file, 'status.embeddable': '',", "f) if resumeUploads == True: print \"\\nResuming video uploads...\\n\" time.sleep(1) try: with open(folderName", "Exception as e: print e print \"\\nThe program is unable to resume uploads", "you want to process - ES #interviewer = \"C.V.\" #interviewee = \"V.S.\" #where", "open(folderName + \"/\" + 'videoids.pkl', 'rb') as f: videoids = pickle.load(f) print \"\\nThe", "ffmpeg_extract_subclip import time from time import strftime,localtime from postprocess_and_fuse_subs import compileSubs import pickle", "'unlisted', 'status.publicStatsViewable': ''}, folderName + \"/\" + originalVideo, part='snippet,status') # place video in", "set def remove_empty_kwargs(**kwargs): good_kwargs = {} if kwargs is not None: for key,", "videos will be uploaded. question = \"\\nThere were \" + str(len(splits)) + \"", "None: print error retry += 1 if retry > MAX_RETRIES: exit(\"No longer attempting", "of your subtitles (e.g. en, fr, es, etc.):\\n(You can refer to the second", "get_authenticated_service(args) def print_results(results): print(results) # Build a resource based on a list of", "Will you be cutting your video into video snippets (y) \") answer =", "an SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME = \"youtube\" API_VERSION = \"v3\" #", "if properties[p]: if is_array: ref[key] = properties[p].split(',') else: ref[key] = properties[p] elif key", "#for search_result in search_response.get(\"items\", []): # videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"])) # #print \"Videos:\\n\", \"\\n\".join(videos),", "if deleteVideos == True: print \"\\nDeleting videos...\\n\" c = 1 for s in", "for x in text] #split times (?) splits = [] #list of cut-up", "or a == 'n' or a == '': return a else: a =", "that the code can properly run.\" exit() #ES print texts[c] #print \"splits: \"", "# the OAuth 2.0 information for this application, including its client_id and #", "True # print \"Waiting for transcripts \" + str(c) + \" \" +", "thefile.write(subtitle) if cc == \"31\": print subtitle c += 1 time.sleep(3) #deletes videos", "playlist def playlist_items_insert(properties, **kwargs): resource = build_resource(properties) # See full sample for function", "your first time running the tool, simply leave the following answers blank. For", "were already uploaded to Youtube. Now trying to resume uploading the remaining snippets...)\"", "media_body=file ).execute() id = insert_result[\"id\"] name = insert_result[\"snippet\"][\"name\"] language = insert_result[\"snippet\"][\"language\"] status =", "'%s(%s) in '%s' language, '%s' status.\" % (name, # id, language, status) c", "processes you would like to run: \\n\\n\" time.sleep(2) answer = raw_input(\"\\n1/7 Will you", "on the files you have indicated, you will temporarily require \" + str(videoSize)", "+ \"/\" + \"delete me.txt\", 'r') as myfile: text = myfile.read().replace('\\n', '') with", "timestamps demarcating the length of each video to which your subtitle files are", "'captionsids.pkl', 'rb') as f: captionsids = pickle.load(f) #if wait == True: if downloadCaptions", "a \\n to the end of each line (why?) t += \"\\n\" #ES:", "please select which processes you would like to run: \\n\\n\" time.sleep(2) answer =", "uploadTranscripts = False elif answer == '': uploadTranscripts = True answer = raw_input(\"\\n5/7", "originalVideo = raw_input(\"Enter the file name of your video (this time including the", "answer == 'y': fullSentenceSubtitles = True elif answer == 'n': fullSentenceSubtitles = False", "snipping and uploading content to Youtube for processing. This may take between 20", "answer 'y' or 'n', or leave the answer blank by hitting 'Enter': \")", "= t1 elif len(t) == 3: #if we are only combining subtitle files,", "#place-based time stamping can be set to True or False (make a variable", "+ '/' + originalVideo).st_size/1000000 answer = raw_input(\"If this is your first time running", "moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip import time from time import strftime,localtime from postprocess_and_fuse_subs import compileSubs", "24h using the current API credentials. Continue?\" print \"\\nIf all input was correct,", "answer is invalid\": reply = str(raw_input(question+' (y/n): ')).lower().strip() if reply[0] == 'y': return", "subtitle file for your video.\\n\\nYou may switch these processes 'on' or 'off' depending", "(y) \") answer = verify_y_n_none(answer) if answer == 'y': downloadCaptions = True elif", "delete uploaded video snippets from your Youtube account once subtitle processing is complete", "time.sleep(1) print \"\\n\" print \"This tool:\\n- snips your transcript (.txt) into text snippets", "if there is a problem with playlist id, might need to create a", "+ t1 else: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t1]) #print int(t[0])*3600", "\"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) else: if resumeUploads == True", "* sleepingTime) else: if downloadCaptions == True: with open(folderName + \"/\" + 'captionsids.pkl',", "t = unicode(t, \"UTF-8\") #split the timestamps at : (into 3) t =", "== True: #compiles them all print \"\\nCombining subtitle snippets ...\" #ES: this is", "i in splits: # print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print splits #print", "playlistID, playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId': vid, 'snippet.position': ''}, part='snippet', onBehalfOfContentOwner='') print \"Waiting for", "True so that the code can properly run.\" exit() #ES print texts[c] #print", "u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4',", "resumeUploads = True elif answer == 'n': resumeUploads = False elif answer ==", "= True answer = raw_input(\"\\n2/7 Will you be uploading video snippets to Youtube", "the associated video accordingly into video snippets,\\n- uploads these video snippets to Youtube", "concatenating snippets (i.e. when combineSubtitles = True) #ES A feature created by RG", "client_id.json CLIENT_SECRETS_FILE = \"client_id.json\" # This OAuth 2.0 access scope allows for full", "a list of timestamps demarcating the length of each video to which your", "'') #print \"ES: replace \\\\n with ''\" with open(folderName + \"/\" + fileName", "(not recommended, since some timestamp/subtitle units can end up being excessively large) fullSentenceSubtitles", "like to run. If this is your first time running the tool, simply", "combineSubtitles == True: #compiles them all print \"\\nCombining subtitle snippets ...\" #ES: this", "to the second column in http://www.loc.gov/standards/iso639-2/php/code_list.php for the appropriate two-letter 'ISO 639-1' language", "uploads per 24h using the current API credentials. Continue?\" print \"\\nIf all input", "can run if combine_only == True: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2])", "combine_only == True: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t0+t1]) #print int(t[0])*3600", "100 video uploads per 24h using the current API credentials. Continue?\" print \"\\nIf", "media_file = folderName + '/' + fileName + \"_\" + str(c) + \".flv\"", "time.sleep(sleep_seconds) return response['id'] if uploadTranscripts == True or resumeUploads == True or downloadCaptions", "can lead to short, choppy, fast subtitles that are hard to read) (n)", "'': verifyLanguage = raw_input(\"\\nYou have entered '\" + language + \"' as the", "False key = prop_array[pa] # Convert a name like \"snippet.tags[]\" to snippet.tags, but", "pickle.load(f) print \"\\nThe video IDs are composed of the following: \" + str(videoids)", "few questions...\\\"), please input them. If this does not apply to your transcript,", "\"exiting application...\" time.sleep(2) exit() print \"\\n\" if snipVideos == True or uploadTranscripts ==", "#adjust sleeping time as needed - ES #adjust switches as needed sleepingTime =", "credentials. Continue?\" print \"\\nIf all input was correct, the program will begin snipping", "t1 elif len(t) == 3: #if we are only combining subtitle files, and", "with open(folderName + \"/\" + 'captionsids.pkl', 'rb') as f: captionsids = pickle.load(f) #if", "resume or your 'videoids.pkl' file has gone missing. The program will restart by", "used this tool, please select which processes you would like to run: \\n\\n\"", "snips the associated video accordingly into video snippets,\\n- uploads these video snippets to", "if pa == (len(prop_array) - 1): # Leave properties without values out of", "text] #split times (?) splits = [] #list of cut-up texts texts =", "status = insert_result[\"snippet\"][\"status\"] #print \"Uploaded caption track '%s(%s) in '%s' language, '%s' status.\"", "video to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" +", "running this tool on the files you have indicated, you will temporarily require", "into a single subtitle file for your video.\\n\\nYou may switch these processes 'on'", "os.stat(folderName + '/' + originalVideo).st_size except Exception as e: print e print \"The", "as e: print \"No text file found because you are not running the", "list of properties given as key-value pairs. # Leave properties with empty values", "guidelines on proper timestamp formatting.\" print \"\\nexiting application...\" time.sleep(2) exit() if len(t) ==", "are not running the entire pipeline. Creating dummy file 'delete me.txt' to finish", "else: if resumeUploads == True or deleteVideos == True or uploadTranscripts == True:", "steps you would like to run. If this is your first time running", "+ \".txt\", 'r') as myfile: text = myfile.read().replace('\\n', '') #print \"ES: replace \\\\n", "#split the timestamps at : (into 3) t = t.split(\":\") if len(t) >", "#in this case, the user has chosen to only combine subtitles. the switch", "uploadVideos = True answer = raw_input(\"\\n3/7 Will you be resuming video uploads from", "originalVideo).st_size/1000000 answer = raw_input(\"If this is your first time running this tool on", "Explicitly tell the underlying HTTP transport library not to retry, since # we", "== 'y': downloadCaptions = True elif answer == 'n': downloadCaptions = False elif", "% response['id'] videoid = response['id'] elif method != 'insert' or 'id' not in", "snippets to be deleted from Youtube once subtitles have been successfully generated? (n)", "try: verifyExistence = os.stat(folderName + '/' + fileName + '.txt').st_size except Exception as", "Consult README.md for guidelines on proper timestamp formatting.\" print \"\\nexiting application...\" time.sleep(2) exit()", "subtitles for a video for which you have an associated transcript. Make sure", "options were selected. Exiting...\" exit() #ES: UPLOADS THE VIDEOS if uploadVideos == True:", "= False answer = raw_input(\"\\n7/7 Will you be combining the downloaded subtitle snippets", "print \"Video id '%s' was successfully uploaded.\" % response['id'] videoid = response['id'] elif", "str(len(splits)) + \" timestamps detected in \" + fileName + \". \" +", "(Experimental; this feature is not recommended since subtitle units tend to become excessively", "True or deleteVideos == True: combine_only = False fileName = raw_input(\"Enter the file", "deets #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c: \" + str(c) with open(folderName + \"/\" +", "\"\\n\": #ES: add t to position c of texts texts[c] += t#.encode('utf8') #print", "see README.md for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" videoSize =", "from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser, run_flow", "len(t) == 2: if combine_only == True: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t0+t1])", "storage.get() if credentials is None or credentials.invalid: credentials = run_flow(flow, storage, args) #", "are hard to read) (n) \") answer = verify_y_n_none(answer) if answer == 'y':", "str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) #search_response = service.search().list( # q=\"Anita\", # part=\"id\", #", "that needs exploration so as to make sure that place names are never", "int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t1 except ValueError as e: print e", "deleteVideos = True elif answer == 'n': deleteVideos = False elif answer ==", "+ \"/\" + fileName + \".txt\", 'r') as myfile: text = myfile.read().replace('\\n', '')", "'insert' and 'id' in response: print \"Video id '%s' was successfully uploaded.\" %", "that has yet to be explored... placeBasedTimestamping = False #ES: resample subtitles to", "\".txt\") as f: text = f.readlines() except IOError as e: print \"No text", "if t != \"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\" in t: #increase pos on", "on proper timestamp formatting.\" print \"\\nexiting application...\" time.sleep(2) exit() if len(t) == 2:", "timestamps are in ascending order...\" sp1 = 0 num = 0 #print str(splits)", "splits: # print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print splits #print \"Wait\" #time.sleep(30)", "answer = raw_input(\"\\n2/7 Will you be uploading video snippets to Youtube for syncing?", "\") t_0 = time[0].split(\":\") t_1 = time[1].split(\":\") t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60) +", "download this discovery document from the developers page # and it should be", "this is your first time running this tool on the files you have", "# let rodolphe know if there is a problem with playlist id, might", "some timestamp/subtitle units can end up being excessively large) fullSentenceSubtitles = False #ES:", "key not in ref: # For example, the property is \"snippet.title\", but the", "snippets based on the transcript's timestamps (must be set to True for other", "insert_result = youtube.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=video_id, language=language, name=name, isDraft=True ) ), media_body=file", "True: #print splits,videoids #uploads transcripts print \"\\nUploading transcripts...\" for s in splits: print", "client_id and # client_secret. \"\"\" to create a client secret file: google apis", "reply[0] == '': return True if reply[0] == 'n': exit() if uploadVideos ==", "splits = [] #list of cut-up texts texts = [\"\"] t0 = 0", "downloadCaptions == True: print \"\\nDownloading captions...\" c = 1 waitLonger = True for", "t_list = [] #ES: PREPARE INPUT TEXT FOR PROCESSING if snipTranscript == True:", "'/' + originalVideo).st_size/1000000 answer = raw_input(\"If this is your first time running this", ",\") isn't formatted correctly. Consult README.md for guidelines on proper timestamp formatting.\" print", "it is never qualified as a digit (False) and therefore the following condition", "of your timestamps:\" print \"Timestamp number #\",str(num+2),\" (equivalent to \",str(sp[1]),\" seconds) should be", "yet to be explored... placeBasedTimestamping = False #ES: resample subtitles to prevent cut-up", "are only combining subtitle files, and we are using a .txt file with", "the name of a file that contains # the OAuth 2.0 information for", "# id, language, status) c = 1 captionsids = [] wait = False", "\"Video id '%s' was successfully uploaded.\" % response['id'] videoid = response['id'] elif method", "= service.videos().insert( body=resource, media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True), **kwargs ) vid = resumable_upload(request, 'video', 'insert')", "= False # except: # waitLonger = True # print \"Waiting for transcripts", "#interviewer = \"E.H.\" #interviewee = \"E.M.\" #fileName = 'Frederic' #originalVideo = \"Frederic.mov\" #interviewer", "** retry sleep_seconds = random.random() * max_sleep print \"Sleeping %f seconds and then", "= raw_input(\"If this is your first time running this tool on the files", "is your first time running the tool, simply leave the following answers blank.", "(name, # id, language, status) c = 1 captionsids = [] wait =", "this application, including its client_id and # client_secret. \"\"\" to create a client", "False # except: # waitLonger = True # print \"Waiting for transcripts \"", "print c,videoids[c-1] service.videos().delete( id=videoids[c-1] ).execute() c += 1 time.sleep(10) if combineSubtitles == True:", "if is_array: ref[key] = properties[p].split(',') else: ref[key] = properties[p] elif key not in", "media_file, part='snippet,status') videoids.append(vid) print videoids #c += 1 wait = True with open(folderName", "was correct, the program will begin snipping and uploading content to Youtube for", "+ str(len(splits)) + \" parts\" time.sleep(1) for s in splits: c += 1", "#requires oscar.mp4 video file in same folder #transcript text file \"oscar4.txt\" #might have", "t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c: \" + str(c) with open(folderName + \"/\" + fileName +", "True: with open(folderName + \"/\" + 'captionsids.pkl', 'rb') as f: captionsids = pickle.load(f)", "Continue?\" print \"\\nIf all input was correct, the program will begin snipping and", "splits: print c,s media_file = folderName + '/' + fileName + \"_\" +", "# resource's \"snippet\" object. ref[key] = {} ref = ref[key] else: # For", "file's extention): \") try: verifyExistence = os.stat(folderName + '/' + originalVideo).st_size except Exception", "2.0\" # Authorize the request and store authorization credentials. def get_authenticated_service(args): flow =", "return response['id'] if uploadTranscripts == True or resumeUploads == True or downloadCaptions ==", "RG that has yet to be explored... placeBasedTimestamping = False #ES: resample subtitles", "0 while response is None: try: print \"Uploading file...\" status, response = request.next_chunk()", "u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with open(folderName + \"/\" + 'videoids.pkl',", "= flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage = Storage(\"youtube-api-snippets-oauth2.json\") credentials = storage.get() if credentials is", "u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w',", "enabled 'resampleSubtitles' (above), you have the option to make subtitle entries full sentences", "elif answer == '': resampleSubtitles = False if resampleSubtitles == True: answer =", "API v3\" and ENABLE it click \"create credentials\" create and \"OAUT client id\"", "language = insert_result[\"snippet\"][\"language\"] status = insert_result[\"snippet\"][\"status\"] #print \"Uploaded caption track '%s(%s) in '%s'", "from time import strftime,localtime from postprocess_and_fuse_subs import compileSubs import pickle import os #adjust", "def verify_y_n(a): while True: a = a.lower().strip() if a == 'y' or a", "(.txt) uploadTranscripts = True #ES: download snippet subtitle files (.vtt) downloadCaptions = True", "# the value as an array. if key[-2:] == '[]': key = key[0:len(key)-2:]", "in custom playlist def playlist_items_insert(properties, **kwargs): resource = build_resource(properties) # See full sample", "open(folderName + \"/\" + \"delete me.txt\",\"w+\") foo.close() with open(folderName + \"/\" + \"delete", "raw_input(\"\\n1/7 Will you be cutting your video into video snippets (y) \") answer", "foo = open(folderName + \"/\" + \"delete me.txt\",\"w+\") foo.close() with open(folderName + \"/\"", "True if combineSubtitles == True: answer = raw_input(\"\\n7.1 Would you like to reorganize", "= argparser.parse_args() service = get_authenticated_service(args) def print_results(results): print(results) # Build a resource based", ": (into 3) t = t.split(\":\") if len(t) > 3 or len(t) <", "several hours, depending on the size of your video file (\" + str(videoSize)", "length of each video to which your subtitle files are associated. These values", "folder must be located inside the 'files' folder): \") try: verifyExistence = os.stat(folderName).st_size", "snippets into a single subtitle file for your video? (y) \") answer =", "ask you a few questions...\\\"), please input them. If this does not apply", "#if we are only combining subtitle files, and we are using a .txt", "uploading content to Youtube for processing. This may take between 20 minutes and", ".txt) #fileName = 'venant' #originalVideo refers to the name of the video file", "with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) else: if", "print \"No text file found because you are not running the entire pipeline.", "hard drive, and then restart the program.\" print \"exiting application...\" time.sleep(2) exit() print", "of the inserted resource. def build_resource(properties): resource = {} for p in properties:", "\"[\" in t: #increase pos on texts by 1 c += 1 #ES:", "t_list.append(t1) t0 = t0 + t1 else: t1 = int(t[0])*3600 + int(t[1])*60 +", "str(videoSize) + \" Mb).\" yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits)) +", "answer = verify_y_n(answer) if answer == \"n\": print \"Please make sure you have", "str(int(s)) return str(int(h)) + \":\" + str(int(m)) + \":\" + str(int(sec)) #ES: open", "like to ask you a few questions...\\\"), please input them. If this does", "= \"0\" + str(c) else: cc = str(c) #print subtitle print cc with", "tool:\\n- snips your transcript (.txt) into text snippets based on its timestamps,\\n- snips", "following two answers blank by pressing the 'Enter' key.\" time.sleep(1) interviewer = raw_input(\"\\n6.3.1", "#time.sleep(.1) texts.append(\"\") texts[c] = \"\" #t = t.replace(\" \", \"\") #t = t", "store authorization credentials. def get_authenticated_service(args): flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage = Storage(\"youtube-api-snippets-oauth2.json\")", "id = insert_result[\"id\"] name = insert_result[\"snippet\"][\"name\"] language = insert_result[\"snippet\"][\"language\"] status = insert_result[\"snippet\"][\"status\"] #print", "subtitle files (.vtt),\\n- stitches these subtitle files together into a single subtitle file", "time including the file's extention): \") try: verifyExistence = os.stat(folderName + '/' +", "u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs',", "= videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '',", "precede their discourse (e.g. \\\"Emmanuel: Hi, I'd like to ask you a few", "the rest of the pipeline can run if combine_only == True: t1 =", "users who have already used this tool, please select which processes you would", "postprocess_and_fuse_subs import compileSubs import pickle import os #adjust sleeping time as needed -", "video IDs are composed of the following: \" + str(videoids) #print videoids if", "print \"You may terminate the application at any point by pressing Ctrl+C (Cmd+C", "captions.insert method to upload a caption track in draft status. def upload_caption(youtube, video_id,", "time.sleep(1) if len(videoids) > 0: print \"(However, it looks like \",len(videoids),\" video snippets", "#ES: this is a feature that needs exploration so as to make sure", "your timestamps (\",':'.join(t) ,\") isn't formatted correctly. Consult README.md for guidelines on proper", "= [500, 502, 503, 504] # This method implements an exponential backoff strategy", "is a problem with playlist id, might need to create a playlist in", "f: text = f.readlines() except IOError as e: print \"No text file found", "'y': uploadVideos = True snipVideos = True elif answer == 'n': uploadVideos =", "'r') as myfile: text = myfile.read().replace('\\n', '') with open(folderName + \"/\" + \"delete", "str(int(m)) + \":\" + str(int(s)) return str(int(h)) + \":\" + str(int(m)) + \":\"", "and is not a next-line char #ES: removing punctuation from '[00:00:01.09]' since it", "to reorganize subtitles according to punctuation? (Experimental; can lead to short, choppy, fast", "thefile: #thefile.write(sub_txt) thefile.write(subtitle) if cc == \"31\": print subtitle c += 1 time.sleep(3)", "given as key-value pairs. # Leave properties with empty values out of the", "for guidelines on proper timestamp formatting.\" print \"\\nVerifying if timestamps are in ascending", "one of your timestamps:\" print \"Timestamp number #\",str(num+2),\" (equivalent to \",str(sp[1]),\" seconds) should", "private videos only visible to your account,\\n- uploads the text snippets to Youtube", "it containing \" + str(len(splits)) + \" timestamps formatted like such '[HH:MM:SS.00]'.\" else:", "\"v3\" # This variable defines a message to display if the CLIENT_SECRETS_FILE is", "= False answer = raw_input(\"\\n7.2 Would you like to reorganize subtitles according to", "following condition is almost always met. if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t != \"\\n\":", "OAuth 2.0 access scope allows for full read/write access to the # authenticated", "with open(folderName + \"/\" + \"delete me.txt\") as f: text = f.readlines() pass", "transcript and/or video and/or subtitle files\\n(this folder must be located inside the 'files'", "list (excluding the \\\".txt\\\" extention): \") else: print \"You have not chosen any", "switches as needed sleepingTime = 400 #___SWITCHES(defaults)___# #ES: cut transcript into snippets based", "sentences (not recommended, since some timestamp/subtitle units can end up being excessively large)", "needed sleepingTime = 400 #___SWITCHES(defaults)___# #ES: cut transcript into snippets based on the", "True for other processes to run) snipTranscript = True #ES: cut video into", "program will begin snipping and uploading content to Youtube for processing. This may", "we are handling retry logic ourselves. httplib2.RETRIES = 1 # Maximum number of", "), media_body=caption_file ).execute() captionsids.append(a['id']) c += 1 #print a wait = True with", "it should be in the same directory with the code. return build(API_SERVICE_NAME, API_VERSION,", "# Build a resource based on a list of properties given as key-value", "answer == 'n': removeLoneWords = False elif answer == '': removeLoneWords = False", "answer == 'y': combineSubtitles = True elif answer == 'n': combineSubtitles = False", "if answer == 'y': removeLoneWords = True elif answer == 'n': removeLoneWords =", "begin snipping and uploading content to Youtube for processing. This may take between", "video and compiled transcript to your Youtube account once complete uploadFull = False", "resampleSubtitles == True: answer = raw_input(\"\\n7.1.1 Would you like to reorganize subtitles to", "f: pickle.dump(captionsids, f) print \"Waiting for transcripts to be processed into captions. It", "part='snippet,status') # place video in custom playlist def playlist_items_insert(properties, **kwargs): resource = build_resource(properties)", "+ \" timestamps formatted like such '[HH:MM:SS.00]'.\" else: print \"Please set the variable", "IF you enabled 'resampleSubtitles' (above), you have the option to make subtitle entries", "pairs. # Leave properties with empty values out of the inserted resource. def", "\" + fileName + \". \" + str(len(splits)) + \" video snippets will", "#interviewee = \"O.G.\" #folderName = 'oscar' #fileName = 'oscar' #originalVideo = \"Oscar.mp4\" ###", "\", \"\") #t = t t = t.replace('[','').replace(']','').replace('\\n','') t = unicode(t, \"UTF-8\") #split", "so that the code can properly run.\" exit() #ES print texts[c] #print \"splits:", "be setting a property in the # resource's \"snippet\" object. ref[key] = {}", "according to punctuation? (Experimental; can lead to short, choppy, fast subtitles that are", "demarcating the length of each video to which your subtitle files are associated.", "+ \". \" + str(len(splits)) + \" video snippets will created. Continue?\" print", "transcript has speaker names (e.g. the interviewer or interviewee's names) that precede their", "'venant' #fileName refers to the name of the input .txt file (excluding .txt)", "on its timestamps,\\n- snips the associated video accordingly into video snippets,\\n- uploads these", "want to process - ES #interviewer = \"C.V.\" #interviewee = \"V.S.\" #where the", "of place names? (Experimental) (n) \") answer = verify_y_n_none(answer) if answer == 'y':", "transcripts (.txt) uploadTranscripts = True #ES: download snippet subtitle files (.vtt) downloadCaptions =", "stamping can be set to True or False (make a variable for this)", "the full video and compiled transcript to your Youtube account once complete uploadFull", "for guidance. they can be removed. #ES: a list of the transcript's timestamps", "+ str(videoids) #print videoids if resumeUploads == True or deleteVideos == True or", "connection. YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME = \"youtube\" API_VERSION = \"v3\" # This variable", "videoId=video_id, language=language, name=name, isDraft=True ) ), media_body=file ).execute() id = insert_result[\"id\"] name =", "open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) else: if resumeUploads", "e: print e print \"The folder named '\" + folderName + \"' does", "file in same folder #transcript text file \"oscar4.txt\" #might have to be in", "p.split('.') ref = resource for pa in range(0, len(prop_array)): is_array = False key", "running the entire pipeline. Creating dummy file 'delete me.txt' to finish pipeline.\" foo", "processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(sleepingTime/60) + \"", "\"/\" + fileName + \".txt\", 'r') as myfile: text = myfile.read().replace('\\n', '') #print", "elif answer == '': uploadVideos = True answer = raw_input(\"\\n3/7 Will you be", "\" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) id = vid print \"\\nUploading compiled", "apis dashboard --> create a new project on the resulting dashboard, \"enable apis", "from apiclient.http import MediaFileUpload from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from", "False elif answer == '': combineSubtitles = True if combineSubtitles == True: answer", "== 'insert' and 'id' in response: print \"Video id '%s' was successfully uploaded.\"", "== True: print \"\\nDeleting videos...\\n\" c = 1 for s in splits: print", "t.split(\":\") if len(t) > 3 or len(t) < 3: print \"\\nOne of your", "'y': fullSentenceSubtitles = True elif answer == 'n': fullSentenceSubtitles = False elif answer", "try: #ES: write the previous position of c in texts (a chunk of", "key is <KEY> #client id is in client_id.json CLIENT_SECRETS_FILE = \"client_id.json\" # This", "'snippet.defaultLanguage': language, 'snippet.defaultAudioLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': media_file,", "'videoids.pkl', 'rb') as f: videoids = pickle.load(f) print \"\\nThe video IDs are composed", "texts texts[c] += t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES: this will aggregate phrases (t) into", "have not chosen any options for running this application. Exiting...\" exit() while True:", "s in splits: c += 1 if c > len(videoids): ffmpeg_extract_subclip(folderName + \"/\"", "(.vtt),\\n- stitches these subtitle files together into a single subtitle file for your", "the generated subtitle snippets from Youtube? (y) \") answer = verify_y_n_none(answer) if answer", "or uploadTranscripts == True: with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f:", "value as an array. if key[-2:] == '[]': key = key[0:len(key)-2:] is_array =", "#ES: the following is called when videos are being uploaded (uploadVideos = True)", "int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t0 + t1 else: t1 = int(t[0])*3600", "== True or downloadCaptions == True or deleteVideos == True: args = argparser.parse_args()", "folderName = raw_input(\"Enter the name of the folder containing your transcript and/or video", "captionsids[c-1] + \" to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will", "ffmpeg_extract_subclip(folderName + \"/\" + originalVideo, s[0], s[1], targetname=folderName + \"/\" + fileName +", "verify_y_n_none(answer) if answer == 'y': uploadTranscripts = True elif answer == 'n': uploadTranscripts", "\") answer = verify_y_n_none(answer) if answer == 'y': deleteVideos = True elif answer", "ref[key] = properties[p] elif key not in ref: # For example, the property", "for instructions.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" if snipVideos == True", "transcript: \") print \"\\n\" #____________# # let rodolphe know if there is a", "time.sleep(2) exit() if len(t) == 2: if combine_only == True: t1 = int(t[0])*60", "language=language, name=name, isDraft=True ) ), media_body=file ).execute() id = insert_result[\"id\"] name = insert_result[\"snippet\"][\"name\"]", "resource does # not yet have a \"snippet\" object. Create the snippet object", "snipVideos == True: #ES: the following is called when videos are being uploaded", "followed by a newline.\\n\\nPlease enter the file name of your timestamp list (excluding", "verify_y_n_none(answer) if answer == 'y': deleteVideos = True elif answer == 'n': deleteVideos", "remove_empty_kwargs(**kwargs) # See full sample for function request = service.videos().insert( body=resource, media_body=MediaFileUpload(media_file, chunksize=-1,", "verify_y_n_none(answer) if answer == 'y': placeBasedTimestamping = True elif answer == 'n': placeBasedTimestamping", "process - ES #interviewer = \"C.V.\" #interviewee = \"V.S.\" #where the video and", "return str(int(h)) + \":\" + str(int(m)) + \":\" + str(int(sec)) #ES: open anita/Anita.txt", "+ int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t0 + t1 else: t1 =", "True: #ES: the following is called when videos are being uploaded (uploadVideos =", "+ str(splits) #for i in splits: # print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200])", "times (?) splits = [] #list of cut-up texts texts = [\"\"] t0", "interviewee = raw_input(\"\\n6.3.2 Please input your interviewee's name as it appears in the", "as a digit (False) and therefore the following condition is never met. if", "\"/\" + 'captionsids.pkl', 'wb') as f: pickle.dump(captionsids, f) print \"Waiting for transcripts to", "s in splits: print c,s,captionsids[c-1] sub_txt = \"\" # while waitLonger == True:", "= {} if kwargs is not None: for key, value in kwargs.iteritems(): if", "float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2])) return [t0,t1] def s_to_hms(seconds): m, sec = divmod(seconds,", "compileSubs import pickle import os #adjust sleeping time as needed - ES #adjust", "and text snippets\\n- downloads the text snippets as subtitle files (.vtt),\\n- stitches these", "seconds and then retrying...\" % sleep_seconds time.sleep(sleep_seconds) return response['id'] if uploadTranscripts == True", "isn't formatted correctly. Consult README.md for guidelines on proper timestamp formatting.\" print \"\\nexiting", "prioritize keeping full sentences intact? (Experimental; this feature is not recommended since subtitle", "as to make sure that place names are never split between 2 timestamps,", "feature is not recommended since subtitle units tend to become excessively long) (n)", "+ int(float(t_0[1])*60) + int(float(t_0[2])) t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2])) return [t0,t1]", "verify_y_n_none(answer) if answer == 'y': downloadCaptions = True elif answer == 'n': downloadCaptions", "texts texts = [\"\"] t0 = 0 c = 0 #ES: several print", "0: if sp[1] <= sp1[1]: print \"\\nThere is a problem with one of", "text snippets\\n- downloads the text snippets as subtitle files (.vtt),\\n- stitches these subtitle", "'': return a else: a = raw_input(\"Please answer 'y' or 'n', or leave", "switch these processes 'on' or 'off' depending on which steps you would like", "a name like \"snippet.tags[]\" to snippet.tags, but handle # the value as an", "(verify)) removeLoneWords = False #____________# #ES: USER INTERVIEW SECTION def verify_y_n(a): while True:", "= 0 num = 0 #print str(splits) #print str(t_list) for sp in splits:", "snippet=dict( videoId=video_id, language=language, name=name, isDraft=True ) ), media_body=file ).execute() id = insert_result[\"id\"] name", "import httplib import random from apiclient.discovery import build from apiclient.errors import HttpError from", "+ \"_\" + str(c) + \".txt\" #print s,media_file,caption_file,videoids[c-1] a = service.captions().insert( part=\"snippet\", body=dict(", "'[00:00:01.09]' since it is never qualified as a digit (False) and therefore the", "== 'y': removeLoneWords = True elif answer == 'n': removeLoneWords = False elif", "'videoids.pkl' file has gone missing. The program will restart by uploading all videos.", "uploaded video.', 'snippet.tags[]': '', 'snippet.title': media_file, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable':", "+ int(t[1]) splits.append([t0,t1]) t_list.append(t1) t0 = t1 elif len(t) == 3: #if we", "was successfully uploaded.\" % response['id'] videoid = response['id'] elif method != 'insert' or", "True snipVideos = True elif answer == 'n': uploadVideos = False elif answer", "file 'delete me.txt' to finish pipeline.\" foo = open(folderName + \"/\" + \"delete", "'rb') as f: captionsids = pickle.load(f) #if wait == True: if downloadCaptions ==", "input was correct, the program will begin snipping\" yes_or_no(question) print \"\\n1. Slicing into", "as needed sleepingTime = 400 #___SWITCHES(defaults)___# #ES: cut transcript into snippets based on", "uploadVideos = True snipVideos = True elif answer == 'n': uploadVideos = False", "you be uploading text snippets for syncing with your video snippets? (y) \")", "e print \"\\n One of your timestamps isn't formatted correctly. Consult README.md for", "= divmod(m, 60) #print str(int(h)) + \":\" + str(int(m)) + \":\" + str(int(s))", "= youtube.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=video_id, language=language, name=name, isDraft=True ) ), media_body=file ).execute()", "+ \"/\" + fileName + \"_\" + str(cc) + \".vtt\", 'w') as thefile:", "video...\" vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.description': 'Description of uploaded video.',", "= 0 c = 0 #ES: several print commands were added for guidance.", "split into \"snippet\" and \"title\", where # \"snippet\" will be an object and", "transcript files for these video snippets,\\n- allows Youtube to sync the video and", "correctly. Consult README.md for guidelines on proper timestamp formatting.\" print \"\\nVerifying if timestamps", "= 0 #print str(splits) #print str(t_list) for sp in splits: if num >", "\"Uploaded caption track '%s(%s) in '%s' language, '%s' status.\" % (name, # id,", "ref = resource for pa in range(0, len(prop_array)): is_array = False key =", "available space on your hard drive to run this program. Continue? (y/n) \")", "def yes_or_no(question): while \"the answer is invalid\": reply = str(raw_input(question+' (y/n): ')).lower().strip() if", "snippet transcripts (.txt) uploadTranscripts = True #ES: download snippet subtitle files (.vtt) downloadCaptions", "may terminate the application at any point by pressing Ctrl+C (Cmd+C on Mac).\"", "Mb available space on your hard drive to run this program. Continue? (y/n)", "running this application. Exiting...\" exit() while True: language = raw_input(\"Enter the language code", "build from apiclient.errors import HttpError from apiclient.http import MediaFileUpload from oauth2client.client import flow_from_clientsecrets", "if not os.path.exists(media_file): exit('Please specify a valid file location.') print \"\\nSnipping completed. No", "full sample for function results = service.playlistItems().insert( body=resource, **kwargs ).execute() print_results(results) #'snippet.playlistId': playlistID,", "oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser, run_flow #", "resumeUploads = False answer = raw_input(\"\\n4/7 Will you be uploading text snippets for", "'videoids.pkl', 'wb') as f: pickle.dump(videoids, f) else: if resumeUploads == True or deleteVideos", "elif len(t) == 3: #if we are only combining subtitle files, and we", "u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY',", "c,s media_file = folderName + '/' + fileName + \"_\" + str(c) +", "\") if verifyLanguage.lower() == '' or 'y': break #if combineSubtitles == True: print", "with ''\" with open(folderName + \"/\" + fileName + \".txt\") as f: text", "e: print e print \"\\nThe program is unable to resume uploads because there", "extention): \") else: print \"You have not chosen any options for running this", "you be downloading the generated subtitle snippets from Youtube? (y) \") answer =", "retrying...\" % sleep_seconds time.sleep(sleep_seconds) return response['id'] if uploadTranscripts == True or resumeUploads ==", "= True for s in splits: print c,s,captionsids[c-1] sub_txt = \"\" # while", "aggregate phrases (t) into one list item (a text) until a timestamp is", "u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI',", "#print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #print \"c: \" + str(c) with open(folderName + \"/\" + fileName", "as the language code for your transcript and video files. Youtube will use", "= response['id'] elif method != 'insert' or 'id' not in response: print response", "method == 'insert' and 'id' in response: print \"Video id '%s' was successfully", "leave the answer blank by hitting 'Enter': \") continue print \"\\n\\n\" print \"This", "u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE',", "containing \" + str(len(splits)) + \" timestamps formatted like such '[HH:MM:SS.00]'.\" else: print", "No further options were selected. Exiting...\" exit() #ES: UPLOADS THE VIDEOS if uploadVideos", "== True or deleteVideos == True: args = argparser.parse_args() service = get_authenticated_service(args) def", "VIDEOS if uploadVideos == True: #ES: the following is called when videos are", "this feature is not recommended since subtitle units tend to become excessively long)", "combine_only = True fileName = raw_input(\"In order to accurately combine subtitle files, you", "full video...\" vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.description': 'Description of uploaded", "question = \"\\nThere were \" + str(len(splits)) + \" timestamps detected in \"", "will temporarily require \" + str(videoSize) + \" Mb available space on your", "else: print \"Please set the variable 'snipTranscript' to True so that the code", "access scope allows for full read/write access to the # authenticated user's account", "False elif answer == '': snipVideos = True answer = raw_input(\"\\n2/7 Will you", "int(float(t_0[1])*60) + int(float(t_0[2])) t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2])) return [t0,t1] def", "'status.embeddable': '', 'status.license': '', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, folderName + \"/\" + originalVideo,", "yes_or_no(question): while \"the answer is invalid\": reply = str(raw_input(question+' (y/n): ')).lower().strip() if reply[0]", "from apiclient.errors import HttpError from apiclient.http import MediaFileUpload from oauth2client.client import flow_from_clientsecrets from", "with open(folderName + \"/\" + fileName + \".txt\", 'r') as myfile: text =", "qualified as a digit (False) and therefore the following condition is never met.", "videoids = pickle.load(f) print \"\\nThe video IDs are composed of the following: \"", "= False #____________# #ES: USER INTERVIEW SECTION def verify_y_n(a): while True: a =", "apis and get credntials like keys\" search for youtube api click \"YouTube Data", "= f.readlines() pass #print \"ES: text is the following\" + str(text) #ES: strip", "timestamp that comes before it (\",str(sp1[1]),\" seconds), but it is smaller.\" print \"Please", "e print \"The file named '\" + originalVideo + \"' does not exist", "sleepingTime / 60) + \" minutes...\" time.sleep(2 * sleepingTime) else: if downloadCaptions ==", "to read) (n) \") answer = verify_y_n_none(answer) if answer == 'y': resampleSubtitles =", "if combine_only == True: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1) t0 =", "Would you like to reorganize subtitles according to punctuation? (Experimental; can lead to", "if reply[0] == 'n': exit() if uploadVideos == False and snipVideos == True:", "RETRIABLE_STATUS_CODES: error = \"A retriable HTTP error %d occurred:\\n%s\" % (e.resp.status,e.content) else: raise", "to reorganize subtitles to prioritize keeping full sentences intact? (Experimental; this feature is", "= str(c) #print subtitle print cc with open(folderName + \"/\" + fileName +", "= \"WARNING: Please configure OAuth 2.0\" # Authorize the request and store authorization", "for function kwargs = remove_empty_kwargs(**kwargs) # See full sample for function request =", "to display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure", "# pickle.dump(videoids, f) if resumeUploads == True: print \"\\nResuming video uploads...\\n\" time.sleep(1) try:", "file with a list of video lengths, then we need to make this", "= resource for pa in range(0, len(prop_array)): is_array = False key = prop_array[pa]", "\"n\": print \"Please make sure you have the available space on your hard", "sub_txt += subtitle cc = \"\" if c < 10: cc = \"0\"", "#fileName = 'oscar' #originalVideo = \"Oscar.mp4\" ### START BOILERPLATE CODE # Sample Python", "= pickle.load(f) #if wait == True: if downloadCaptions == True: print \"\\nDownloading captions...\"", "file named '\" + fileName + \".txt' does not exist in the folder", "short, choppy, fast subtitles that are hard to read) resampleSubtitles = False #ES:", "an unexpected response: %s\" % response) except HttpError, e: if e.resp.status in RETRIABLE_STATUS_CODES:", "be in a folder name called oscar #change certain character variables import imageio", "display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth", "timestamps t_list = [] #ES: PREPARE INPUT TEXT FOR PROCESSING if snipTranscript ==", "whitespace text = [x.strip() for x in text] #split times (?) splits =", "which may be a single word (and put them in an adjacent subtitle", "the program will begin snipping\" yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits))", "# See full sample for function return vid def hms_to_s(time): time = unicode(time,", "other processes to run) snipTranscript = True #ES: cut video into snippets based", "since some timestamp/subtitle units can end up being excessively large) fullSentenceSubtitles = False", "\"You may terminate the application at any point by pressing Ctrl+C (Cmd+C on", "== '': deleteVideos = False answer = raw_input(\"\\n7/7 Will you be combining the", "\"\\n\" folderName = raw_input(\"Enter the name of the folder containing your transcript and/or", "e: print e print \"The file named '\" + originalVideo + \"' does", "+ str(videoSize) + \" Mb available space on your hard drive to run", "your transcript and video files. Youtube will use this code for processing your", "{} if kwargs is not None: for key, value in kwargs.iteritems(): if value:", "function return vid def hms_to_s(time): time = unicode(time, \"UTF-8\") time = time.split(\" -->", "README.md before proceeding.\" time.sleep(1) print \"You may terminate the application at any point", "str(int(h)) + \":\" + str(int(m)) + \":\" + str(int(s)) return str(int(h)) + \":\"", "originalVideo, part='snippet,status') # place video in custom playlist def playlist_items_insert(properties, **kwargs): resource =", "= [\"\"] t0 = 0 c = 0 #ES: several print commands were", "if language != '': verifyLanguage = raw_input(\"\\nYou have entered '\" + language +", "For example, the property is \"snippet.description\", and the resource # already has a", "print e print \"The folder named '\" + folderName + \"' does not", "\"Wait\" #time.sleep(30) c = 0 #print splits videoids = [] #videoids = [u'jDAZHgL-nG4',", "= 2 ** retry sleep_seconds = random.random() * max_sleep print \"Sleeping %f seconds", "search_response.get(\"items\", []): # videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"])) # #print \"Videos:\\n\", \"\\n\".join(videos), \"\\n\" #ES: I", "os.stat(folderName).st_size except Exception as e: print e print \"The folder named '\" +", "google apis dashboard --> create a new project on the resulting dashboard, \"enable", "= raw_input(\"\\n6/7 Would you like your uploaded video snippets to be deleted from", "--> \") t_0 = time[0].split(\":\") t_1 = time[1].split(\":\") t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60)", "u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI',", "snippets uploadVideos = True #ES: if the video/caption upload process was terminated unexpectedly", "\"_\" + str(c) + \".mp4\" if not os.path.exists(media_file): exit('Please specify a valid file", "combineSubtitles == True: print \"\\n\\n\" print \"\\n6.3 If your transcript has speaker names", "(e.resp.status,e.content) else: raise except RETRIABLE_EXCEPTIONS, e: error = \"A retriable error occurred: %s\"", "all input was correct, the program will begin snipping and uploading content to", "track '%s(%s) in '%s' language, '%s' status.\" % (name, # id, language, status)", "'%s' status.\" % (name, # id, language, status) c = 1 captionsids =", "or interviewee's names) that precede their discourse (e.g. \\\"Emmanuel: Hi, I'd like to", "removeLoneWords = False #____________# #ES: USER INTERVIEW SECTION def verify_y_n(a): while True: a", "+ \":\" + str(int(s)) return str(int(h)) + \":\" + str(int(m)) + \":\" +", "One of your timestamps isn't formatted correctly. Consult README.md for guidelines on proper", "Would you like to reorganize subtitles according to the presence of place names?", "the previous position of c in texts (a chunk of text prior to", "needs exploration so as to make sure that place names are never split", "line is not a digit and is not a next-line char #ES: removing", "video snippets from your Youtube account once subtitle processing is complete deleteVideos =", "is raised. RETRIABLE_STATUS_CODES = [500, 502, 503, 504] # This method implements an", "with open(folderName + \"/\" + fileName + \"_\" + str(cc) + \".vtt\", 'w')", "snippets,\\n- allows Youtube to sync the video and text snippets\\n- downloads the text", "downloadCaptions == True or deleteVideos == True: args = argparser.parse_args() service = get_authenticated_service(args)", "\"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\" in t: #increase pos on texts by 1", "words? (Experimental) (n) \") answer = verify_y_n_none(answer) if answer == 'y': removeLoneWords =", "#ES: upload video snippets uploadVideos = True #ES: if the video/caption upload process", "\" minutes...\" time.sleep(2 * sleepingTime) else: if downloadCaptions == True: with open(folderName +", "associated. These values will be used as offsets for accurately combining your subtitle", "== True: t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t0+t1]) #print int(t[0])*3600 +", "while waitLonger == True: # try: subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger = False", "c = 1 for s in splits: print c,videoids[c-1] service.videos().delete( id=videoids[c-1] ).execute() c", "and \"[\" in t: #increase pos on texts by 1 c += 1", "video snippets,\\n- allows Youtube to sync the video and text snippets\\n- downloads the", "to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \"", "is_array: ref[key] = properties[p].split(',') else: ref[key] = properties[p] elif key not in ref:", "README.md for guidelines on proper timestamp formatting.\" print \"\\nVerifying if timestamps are in", "True if pa == (len(prop_array) - 1): # Leave properties without values out", "\"\\n\" print \"This tool:\\n- snips your transcript (.txt) into text snippets based on", "'\" + folderName + \"' does not exist in the current directory. Please", "a list of the transcript's timestamps t_list = [] #ES: PREPARE INPUT TEXT", "'y' or 'n', or leave the answer blank by hitting 'Enter': \") continue", "+ fileName + \"_\" + str(c) + \".txt\" #print s,media_file,caption_file,videoids[c-1] a = service.captions().insert(", "to your account,\\n- uploads the text snippets to Youtube as transcript files for", "as it appears in the transcript: \") print \"\\n\" #____________# # let rodolphe", "properties[p]: if is_array: ref[key] = properties[p].split(',') else: ref[key] = properties[p] elif key not", "content to Youtube for processing. This may take between 20 minutes and several", "entries which may be a single word (and put them in an adjacent", "case, the user has chosen to only combine subtitles. the switch combine_only allows", "== True: #ES: the following is called when videos are being uploaded (uploadVideos", "import random from apiclient.discovery import build from apiclient.errors import HttpError from apiclient.http import", "to remove any previously-uploaded videos if the videos you are uploading are identical.", "+ str(c) + \".txt\" #print s,media_file,caption_file,videoids[c-1] a = service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=videoids[c-1],", "\"venant.mp4\" #interviewer = \"E.H.\" #interviewee = \"E.M.\" #fileName = 'Frederic' #originalVideo = \"Frederic.mov\"", "answer == 'y': snipVideos = True elif answer == 'n': snipVideos = False", "\"exiting application...\" time.sleep(2) exit() print \"\\n\" originalVideo = raw_input(\"Enter the file name of", "handle # the value as an array. if key[-2:] == '[]': key =", "place video in custom playlist def playlist_items_insert(properties, **kwargs): resource = build_resource(properties) # See", "+ float(t[2]) splits.append([t0,t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t1", "'/' + originalVideo).st_size except Exception as e: print e print \"The file named", "int(float(t_1[2])) return [t0,t1] def s_to_hms(seconds): m, sec = divmod(seconds, 60) h, m =", "\"This tool:\\n- snips your transcript (.txt) into text snippets based on its timestamps,\\n-", "+= 1 #print a wait = True with open(folderName + \"/\" + 'captionsids.pkl',", "that are hard to read) resampleSubtitles = False #ES: IF you enabled 'resampleSubtitles'", "to True so that the code can properly run.\" exit() #ES print texts[c]", "# try: subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger = False # except: # waitLonger", "'/' + fileName + \".srt\" service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=id, language=language, name=originalVideo, isDraft=True,", "fileName + \".srt\", 'w') #thefile.write(compiledSubs) if uploadFull == True: print \"\\nUploading full video...\"", "not chosen any options for running this application. Exiting...\" exit() while True: language", "# \"snippet\" will be an object and \"title\" will be a property in", "to resume uploads because there are no uploads to resume or your 'videoids.pkl'", "videoSize = os.stat(folderName + '/' + originalVideo).st_size/1000000 answer = raw_input(\"If this is your", "reply[0] == 'y': return True if reply[0] == '': return True if reply[0]", "kwargs.iteritems(): if value: good_kwargs[key] = value return good_kwargs ### END BOILERPLATE CODE #", "file has gone missing. The program will restart by uploading all videos. You", "we need to make this into a list of cumulative times so that", "search_result in search_response.get(\"items\", []): # videos.append(\"%s\" % (search_result[\"id\"][\"videoId\"])) # #print \"Videos:\\n\", \"\\n\".join(videos), \"\\n\"", "sure that place names are never split between 2 timestamps, at the least.", "to the # authenticated user's account and requires requests to use an SSL", "= True elif answer == 'n': uploadVideos = False elif answer == '':", "in youtube online and copy url id to script #playlistID = \"PLSbFnWujSxCZxm7tYAGNeG9l5s19m4T65\" #language", "\" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) #search_response = service.search().list( # q=\"Anita\", #", "\"snippet\" and \"title\", where # \"snippet\" will be an object and \"title\" will", "the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\" #", "= True snipVideos = True elif answer == 'n': uploadVideos = False elif", "%s\" % response) except HttpError, e: if e.resp.status in RETRIABLE_STATUS_CODES: error = \"A", "+ int(t[1])*60 + float(t[2]) splits.append([t0,t0+t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0", "me.txt\") as f: text = f.readlines() pass #print \"ES: text is the following\"", "'y': placeBasedTimestamping = True elif answer == 'n': placeBasedTimestamping = False elif answer", "downloadCaptions == True: with open(folderName + \"/\" + 'captionsids.pkl', 'rb') as f: captionsids", "extention): \") try: verifyExistence = os.stat(folderName + '/' + originalVideo).st_size except Exception as", "u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo', u'G3gO6DW-wrM', u'qAU_8DNEqP8', u'fbGaOVHXkvY', u'_Knl1rP8Z9w', u'O6f8ZWjSgiw', u'uXY-00DuLjY', u'WpreZ_gbEyw'] #with open(folderName", "= raw_input(\"Please answer 'y' or 'n': \") continue def verify_y_n_none(a): while True: a", "for full video to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in", "(make a variable for this) compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10) #thefile = open(folderName +", "answer == 'n': uploadTranscripts = False elif answer == '': uploadTranscripts = True", "= False elif answer == '': fullSentenceSubtitles = False answer = raw_input(\"\\n7.1.2 Would", "file \"oscar4.txt\" #might have to be in a folder name called oscar #change", "(equivalent to \",str(sp[1]),\" seconds) should be a larger number than the timestamp that", "= \"A retriable HTTP error %d occurred:\\n%s\" % (e.resp.status,e.content) else: raise except RETRIABLE_EXCEPTIONS,", "sp1 = sp num+=1 print \"\\nThe document named '\" + fileName + \".txt'", "= service.playlistItems().insert( body=resource, **kwargs ).execute() print_results(results) #'snippet.playlistId': playlistID, playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId': vid,", "uploading text snippets for syncing with your video snippets? (y) \") answer =", "while True: a = a.lower().strip() if a == 'y' or a == 'n':", "#print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t1 except ValueError as", "\"\\n\" #ES: if the beginning of the line is not a digit and", "setting a property in the # resource's \"snippet\" object. ref[key] = {} ref", "from '[00:00:01.09]' since it is never qualified as a digit (False) and therefore", "named '\" + fileName + \".txt' does not exist in the folder '\"", "you want to continue where you left off (uploadVideos must still be set", "occurred:\\n%s\" % (e.resp.status,e.content) else: raise except RETRIABLE_EXCEPTIONS, e: error = \"A retriable error", "= 'fr' #change these variables according to what story you want to process", "name of a file that contains # the OAuth 2.0 information for this", "its client_id and # client_secret. \"\"\" to create a client secret file: google", "get_authenticated_service(args): flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage = Storage(\"youtube-api-snippets-oauth2.json\") credentials = storage.get() if", "are altered when concatenating snippets (i.e. when combineSubtitles = True) #ES A feature", "uploadTranscripts == True: with open(folderName + \"/\" + 'videoids.pkl', 'wb') as f: pickle.dump(videoids,", "# q=\"Anita\", # part=\"id\", # type=\"video\", # fields=\"items/id\" #).execute() # #videos = []", "\".txt\" #print s,media_file,caption_file,videoids[c-1] a = service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=videoids[c-1], language=language, name=media_file, isDraft=True,", "{} for p in properties: # Given a key like \"snippet.title\", split into", "\") answer = verify_y_n_none(answer) if answer == 'y': downloadCaptions = True elif answer", "missing. MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\" # Authorize the request and", "the next time through the # \"for pa in range ...\" loop, we", "== True or uploadTranscripts == True: with open(folderName + \"/\" + 'videoids.pkl', 'wb')", "a larger number than the timestamp that comes before it (\",str(sp1[1]),\" seconds), but", "retry = 0 while response is None: try: print \"Uploading file...\" status, response", "3) t = t.split(\":\") if len(t) > 3 or len(t) < 3: print", "if a == 'y' or a == 'n': return a else: a =", "therefore the following condition is never met. if t != \"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit()", "of the line is not a digit and is not a next-line char", "#print subtitle print cc with open(folderName + \"/\" + fileName + \"_\" +", "= unicode(time, \"UTF-8\") time = time.split(\" --> \") t_0 = time[0].split(\":\") t_1 =", "size of your video file (\" + str(videoSize) + \" Mb).\" yes_or_no(question) print", "== 'n': exit() if uploadVideos == False and snipVideos == True: #ES: the", "def verify_y_n_none(a): while True: a = a.lower().strip() if a == 'y' or a", "0 #print splits videoids = [] #videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU',", "for pa in range(0, len(prop_array)): is_array = False key = prop_array[pa] # Convert", "Youtube account once subtitle processing is complete deleteVideos = False #ES: upload the", "language = raw_input(\"Enter the language code of your video and transcript or the", "captionsids.append(a['id']) c += 1 #print a wait = True with open(folderName + \"/\"", "pass #print \"ES: text is the following\" + str(text) #ES: strip whitespace text", "f: captionsids = pickle.load(f) #if wait == True: if downloadCaptions == True: print", "to Youtube as private videos only visible to your account,\\n- uploads the text", "your transcript (excluding the \\\".txt\\\" extention): \") try: verifyExistence = os.stat(folderName + '/'", "to reorganize subtitles according to the presence of place names? (Experimental) (n) \")", "to make this into a list of cumulative times so that the rest", "#print a wait = True with open(folderName + \"/\" + 'captionsids.pkl', 'wb') as", "try: verifyExistence = os.stat(folderName).st_size except Exception as e: print e print \"The folder", "raw_input(\"Please answer 'y' or 'n', or leave the answer blank by hitting 'Enter':", "if answer == 'y': fullSentenceSubtitles = True elif answer == 'n': fullSentenceSubtitles =", "can be removed. #ES: a list of the transcript's timestamps t_list = []", "str(splits) #for i in splits: # print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print", "in the current directory. Please see README.md for instructions.\" print \"exiting application...\" time.sleep(2)", "in the folder '\" + folderName + \"'. Please see README.md for instructions.\"", "client id\" \"\"\" #CLIENT_SECRETS_FILE = \"client_secret.json\" #api key is <KEY> #client id is", "dashboard, \"enable apis and get credntials like keys\" search for youtube api click", "\".txt\", 'w') try: #ES: write the previous position of c in texts (a", "(y/n): ')).lower().strip() if reply[0] == 'y': return True if reply[0] == '': return", "these video snippets to Youtube as private videos only visible to your account,\\n-", "return a else: a = raw_input(\"Please answer 'y' or 'n', or leave the", "client secret file: google apis dashboard --> create a new project on the", "parts & uploading videos...\" time.sleep(1) if len(videoids) > 0: print \"(However, it looks", "be uploaded to YouTube for processing. YouTube allows a maximum of 100 video", "not yet have a \"snippet\" object. Create the snippet object here. # Setting", "= False #ES: resample subtitles to prevent cut-up phrases, lone-word subtitles, and improve", "== '': resumeUploads = False answer = raw_input(\"\\n4/7 Will you be uploading text", "False elif answer == '': uploadTranscripts = True answer = raw_input(\"\\n5/7 Will you", "+= 1 if retry > MAX_RETRIES: exit(\"No longer attempting to retry.\") max_sleep =", "by uploading all videos. You may need to remove any previously-uploaded videos if", "#print \"splits: \" + str(splits) #for i in splits: # print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60)", "USER INTERVIEW SECTION def verify_y_n(a): while True: a = a.lower().strip() if a ==", "with empty values out of the inserted resource. def build_resource(properties): resource = {}", "\\\"Emmanuel: Hi, I'd like to ask you a few questions...\\\"), please input them.", "captionsids = pickle.load(f) #if wait == True: if downloadCaptions == True: print \"\\nDownloading", "fileName + \"_\" + str(c) + \".flv\" caption_file = folderName + '/' +", "is in client_id.json CLIENT_SECRETS_FILE = \"client_id.json\" # This OAuth 2.0 access scope allows", "Youtube as private videos only visible to your account,\\n- uploads the text snippets", "= False elif answer == '': placeBasedTimestamping = False print \"\\n\" folderName =", "combine_only == True: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1) t0 = t0", "I'd like to ask you a few questions...\\\"), please input them. If this", "t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\" in t: #increase pos on texts by 1 c +=", "lengths, then we need to make this into a list of cumulative times", "True or uploadTranscripts == True: with open(folderName + \"/\" + 'videoids.pkl', 'wb') as", "captionsids = [] wait = False if uploadTranscripts == True: #print splits,videoids #uploads", "True: print \"\\nUploading full video...\" vid = videos_insert( {'snippet.categoryId': '22', 'snippet.defaultLanguage': language, 'snippet.description':", "resume in \" + str(2 * sleepingTime / 60) + \" minutes...\" time.sleep(2", "resumable=True), **kwargs ) vid = resumable_upload(request, 'video', 'insert') # See full sample for", "to your transcript, simply leave the following two answers blank by pressing the", "transport library not to retry, since # we are handling retry logic ourselves.", "combining your subtitle files.\\nEach timestamp should be written as follows [HH:MM:SS.00], followed by", "= None retry = 0 while response is None: try: print \"Uploading file...\"", "hard to read) resampleSubtitles = False #ES: IF you enabled 'resampleSubtitles' (above), you", ") vid = resumable_upload(request, 'video', 'insert') # See full sample for function return", "= float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2])) return [t0,t1] def s_to_hms(seconds): m, sec =", "generated? (n) \") answer = verify_y_n_none(answer) if answer == 'y': deleteVideos = True", "+ str(c) + \".mp4\" if not os.path.exists(media_file): exit('Please specify a valid file location.')", "+ \" video snippets will therefore be uploaded to YouTube for processing. YouTube", "while \"the answer is invalid\": reply = str(raw_input(question+' (y/n): ')).lower().strip() if reply[0] ==", "# part=\"id\", # type=\"video\", # fields=\"items/id\" #).execute() # #videos = [] # #for", "== 'y': combineSubtitles = True elif answer == 'n': combineSubtitles = False elif", "need to create a list of timestamps demarcating the length of each video", "\" + str(videoSize) + \" Mb available space on your hard drive to", "configure OAuth 2.0\" # Authorize the request and store authorization credentials. def get_authenticated_service(args):", "interviewee's names) that precede their discourse (e.g. \\\"Emmanuel: Hi, I'd like to ask", "text = f.readlines() except IOError as e: print \"No text file found because", "an exponential backoff strategy to resume a # failed upload. def resumable_upload(request, resource,", "transcripts print \"\\nUploading transcripts...\" for s in splits: print c,s media_file = folderName", "some different functionality down the road combine_only = True fileName = raw_input(\"In order", "\"' does not exist in the current directory. Please see README.md for instructions.\"", "will therefore be uploaded to YouTube for processing. YouTube allows a maximum of", "vid print \"\\nUploading compiled subtitles...\" caption_file = folderName + '/' + fileName +", "seconds) should be a larger number than the timestamp that comes before it", "targetname=folderName + \"/\" + fileName + \"_\" + str(c) +\".mp4\") media_file = folderName", "enabled 'resampleSubtitles' (above), you have the option to remove subtitle entries which may", "a = a.lower().strip() if a == 'y' or a == 'n' or a", "elif answer == '': uploadTranscripts = True answer = raw_input(\"\\n5/7 Will you be", "pa in range ...\" loop, we will be setting a property in the", "for your transcript and video files. Youtube will use this code for processing", "a == 'y' or a == 'n': return a else: a = raw_input(\"Please", "+ '/' + fileName + \"_\" + str(c) + \".mp4\" if not os.path.exists(media_file):", "these processes 'on' or 'off' depending on which steps you would like to", "== 'n': uploadTranscripts = False elif answer == '': uploadTranscripts = True answer", "Will you be uploading text snippets for syncing with your video snippets? (y)", "if answer == 'y': placeBasedTimestamping = True elif answer == 'n': placeBasedTimestamping =", "#interviewer = \"S.G.\" #interviewee = \"O.G.\" #folderName = 'oscar' #fileName = 'oscar' #originalVideo", "larger number than the timestamp that comes before it (\",str(sp1[1]),\" seconds), but it", "playlist_items_insert(properties, **kwargs): resource = build_resource(properties) # See full sample for function kwargs =", "Please configure OAuth 2.0\" # Authorize the request and store authorization credentials. def", "the developers page # and it should be in the same directory with", "str(t_list) for sp in splits: if num > 0: if sp[1] <= sp1[1]:", "import compileSubs import pickle import os #adjust sleeping time as needed - ES", "False elif answer == '': placeBasedTimestamping = False print \"\\n\" folderName = raw_input(\"Enter", "True elif answer == 'n': uploadVideos = False elif answer == '': uploadVideos", "name as it appears in the transcript: \") print \"\\n\" #____________# # let", "the tool, simply leave the following answers blank. For more advanced users or", "')).lower().strip() if reply[0] == 'y': return True if reply[0] == '': return True", "name=name, isDraft=True ) ), media_body=file ).execute() id = insert_result[\"id\"] name = insert_result[\"snippet\"][\"name\"] language", "'[HH:MM:SS.00]'.\" else: print \"Please set the variable 'snipTranscript' to True so that the", "= \"client_id.json\" # This OAuth 2.0 access scope allows for full read/write access", "program.\" print \"exiting application...\" time.sleep(2) exit() print \"\\n\" elif combineSubtitles == True: #in", "if method == 'insert' and 'id' in response: print \"Video id '%s' was", "to only combine subtitles. the switch combine_only allows some different functionality down the", "ref[key] else: # For example, the property is \"snippet.description\", and the resource #", "+ \"/\" + fileName + \"_\" + str(c) + \".txt\", 'w') try: #ES:", "uploads to resume or your 'videoids.pkl' file has gone missing. The program will", "#ES: several print commands were added for guidance. they can be removed. #ES:", "restart the program.\" uploadVideos = True wait = False def yes_or_no(question): while \"the", "with one of your timestamps:\" print \"Timestamp number #\",str(num+2),\" (equivalent to \",str(sp[1]),\" seconds)", "= raw_input(\"Enter the file name of your video (this time including the file's", "be used as offsets for accurately combining your subtitle files.\\nEach timestamp should be", "video.\\n\\nYou may switch these processes 'on' or 'off' depending on which steps you", "verify_y_n(a): while True: a = a.lower().strip() if a == 'y' or a ==", "your Youtube account once subtitle processing is complete deleteVideos = False #ES: upload", "1): # Leave properties without values out of inserted resource. if properties[p]: if", "to punctuation? (Experimental; can lead to short, choppy, fast subtitles that are hard", "print \"Please set the variable 'snipTranscript' to True so that the code can", "= 1 waitLonger = True for s in splits: print c,s,captionsids[c-1] sub_txt =", "#ES: IF you enabled 'resampleSubtitles' (above), you have the option to remove subtitle", "u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4',", "'n': resumeUploads = False elif answer == '': resumeUploads = False answer =", "a single word (and put them in an adjacent subtitle (verify)) removeLoneWords =", "resource for pa in range(0, len(prop_array)): is_array = False key = prop_array[pa] #", "timestamps isn't formatted correctly. Consult README.md for guidelines on proper timestamp formatting.\" print", "for key, value in kwargs.iteritems(): if value: good_kwargs[key] = value return good_kwargs ###", "number than the timestamp that comes before it (\",str(sp1[1]),\" seconds), but it is", "name of the folder containing your transcript and/or video and/or subtitle files\\n(this folder", "= os.stat(folderName + '/' + originalVideo).st_size/1000000 answer = raw_input(\"If this is your first", "oauth2client.tools import argparser, run_flow # The CLIENT_SECRETS_FILE variable specifies the name of a", "elif answer == '': snipVideos = True answer = raw_input(\"\\n2/7 Will you be", "ENABLE it click \"create credentials\" create and \"OAUT client id\" \"\"\" #CLIENT_SECRETS_FILE =", "+ \"/\" + 'videoids.pkl', 'wb') as f: # pickle.dump(videoids, f) if resumeUploads ==", ").execute() print \"\\nFull video is soon available on your Youtube channel for you", "leave the following two answers blank by pressing the 'Enter' key.\" time.sleep(1) interviewer", "including its ext #originalVideo = \"venant.mp4\" #interviewer = \"E.H.\" #interviewee = \"E.M.\" #fileName", "t1 else: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t1]) t_list.append(t1) t0 = t1 elif", "uploadVideos = True wait = False def yes_or_no(question): while \"the answer is invalid\":", "% e if error is not None: print error retry += 1 if", "name like \"snippet.tags[]\" to snippet.tags, but handle # the value as an array.", "#ES: I don't think this function is ever called... # Call the API's", "#uploads transcripts print \"\\nUploading transcripts...\" for s in splits: print c,s media_file =", "are in ascending order and that there are no mistakes (see README.md) and", "\" timestamps detected in \" + fileName + \". \" + str(len(splits)) +", "= raw_input(\"\\nYou have entered '\" + language + \"' as the language code", "you be combining the downloaded subtitle snippets into a single subtitle file for", "#ES: strip whitespace text = [x.strip() for x in text] #split times (?)", "\"(However, it looks like \",len(videoids),\" video snippets were already uploaded to Youtube. Now", "text: #add a \\n to the end of each line (why?) t +=", "(search_result[\"id\"][\"videoId\"])) # #print \"Videos:\\n\", \"\\n\".join(videos), \"\\n\" #ES: I don't think this function is", "upload snippet transcripts (.txt) uploadTranscripts = True #ES: download snippet subtitle files (.vtt)", "= None error = None retry = 0 while response is None: try:", "+ \" Mb available space on your hard drive to run this program.", "= raw_input(\"\\n7.1.1 Would you like to reorganize subtitles to prioritize keeping full sentences", "excessively long) (n) \") answer = verify_y_n_none(answer) if answer == 'y': fullSentenceSubtitles =", "README.md for guidelines on proper timestamp formatting.\" print \"\\nexiting application...\" time.sleep(2) exit() if", "as f: pickle.dump(captionsids, f) print \"Waiting for transcripts to be processed into captions.", "and the resource # already has a \"snippet\" object. ref = ref[key] return", "and it should be in the same directory with the code. return build(API_SERVICE_NAME,", "splits.append([t0,t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t1 except ValueError", "language=language, name=originalVideo, isDraft=True, sync=False ) ), media_body=caption_file ).execute() print \"\\nFull video is soon", "%d occurred:\\n%s\" % (e.resp.status,e.content) else: raise except RETRIABLE_EXCEPTIONS, e: error = \"A retriable", "the code can properly run.\" exit() #ES print texts[c] #print \"splits: \" +", "answer == 'n': combineSubtitles = False elif answer == '': combineSubtitles = True", "variables according to what story you want to process - ES #interviewer =", "list of the transcript's timestamps t_list = [] #ES: PREPARE INPUT TEXT FOR", "int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t0 + t1 else: t1", "\"Waiting for transcripts \" + str(c) + \" \" + captionsids[c-1] + \"", "Youtube account once complete uploadFull = False #ES: combine vtt snippets that were", "following condition is never met. if t != \"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\"", "language, 'snippet.defaultAudioLanguage': language, 'snippet.description': 'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': media_file, 'status.embeddable':", "u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec', u'GrMtKARI9kQ',", "a list of properties given as key-value pairs. # Leave properties with empty", "as thefile: #thefile = open(folderName + \"/\" + fileName + \"_\" + str(c)", "open(folderName + \"/\" + 'videoids.pkl', 'rb') as f:videoids = pickle.load(f) except Exception as", "identical. If so, do this manually on youtube.com and then restart the program.\"", "\"E.M.\" #fileName = 'Frederic' #originalVideo = \"Frederic.mov\" #interviewer = \"M.M.\" #interviewee = \"B.K.\"", "#adjust switches as needed sleepingTime = 400 #___SWITCHES(defaults)___# #ES: cut transcript into snippets", "continue print \"\\n\\n\" print \"This application creates subtitles for a video for which", "== True or downloadCaptions == True or deleteVideos == True: combine_only = False", "where you left off (uploadVideos must still be set to True): resumeUploads =", "OAuth 2.0 information for this application, including its client_id and # client_secret. \"\"\"", "sleepingTime = 400 #___SWITCHES(defaults)___# #ES: cut transcript into snippets based on the transcript's", "hard drive to run this program. Continue? (y/n) \") answer = verify_y_n(answer) if", "import argparser, run_flow # The CLIENT_SECRETS_FILE variable specifies the name of a file", "== True: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1) t0 = t0 +", "\"/\" + 'videoids.pkl', 'rb') as f: videoids = pickle.load(f) print \"\\nThe video IDs", "lone-word subtitles, and improve the subtitle structure overall (can lead to short, choppy,", "the remaining snippets...)\" time.sleep(1) for s in splits: c += 1 if c", "is reached #ES: if t is a timestamp #ES: removing punctuation from '[00:00:01.09]'", "your subtitle files.\\nEach timestamp should be written as follows [HH:MM:SS.00], followed by a", "uploading video snippets to Youtube for syncing? (y) \") answer = verify_y_n_none(answer) if", "= False elif answer == '': uploadVideos = True answer = raw_input(\"\\n3/7 Will", "except: # waitLonger = True # print \"Waiting for transcripts \" + str(c)", "subtitles have been successfully generated? (n) \") answer = verify_y_n_none(answer) if answer ==", "resulting dashboard, \"enable apis and get credntials like keys\" search for youtube api", "== '': removeLoneWords = False answer = raw_input(\"\\n7.2 Would you like to reorganize", "\"/\" + fileName + \".srt\", 'w') #thefile.write(compiledSubs) if uploadFull == True: print \"\\nUploading", "the folder containing your transcript and/or video and/or subtitle files\\n(this folder must be", "to True or False (make a variable for this) compiledSubs = compileSubs(folderName,fileName,[['_high-frequency-timestamps',0,placeBasedTimestamping]],t_list,interviewer,interviewee,False,language,resampleSubtitles,fullSentenceSubtitles,removeLoneWords) time.sleep(10)", ".txt file with a list of video lengths, then we need to make", "will restart by uploading all videos. You may need to remove any previously-uploaded", "in the transcript: \") interviewee = raw_input(\"\\n6.3.2 Please input your interviewee's name as", "will need to create a list of timestamps demarcating the length of each", "subtitles (e.g. en, fr, es, etc.):\\n(You can refer to the second column in", "language + \"' as the language code for your transcript and video files.", "#ES: upload the full video and compiled transcript to your Youtube account once", "'Description of uploaded video.', 'snippet.tags[]': '', 'snippet.title': media_file, 'status.embeddable': '', 'status.license': '', 'status.privacyStatus':", "create a new project on the resulting dashboard, \"enable apis and get credntials", "and we are using a .txt file with a list of video lengths,", "or credentials.invalid: credentials = run_flow(flow, storage, args) # Trusted testers can download this", "#interviewer = \"C.V.\" #interviewee = \"V.S.\" #where the video and txt files are", "so that the rest of the pipeline can run if combine_only == True:", "never met. if t != \"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\" in t: #increase", "Leave properties without values out of inserted resource. if properties[p]: if is_array: ref[key]", "not None: print error retry += 1 if retry > MAX_RETRIES: exit(\"No longer", "uploaded to Youtube. Now trying to resume uploading the remaining snippets...)\" time.sleep(1) for", "(t) into one list item (a text) until a timestamp is reached #ES:", "elif answer == 'n': combineSubtitles = False elif answer == '': combineSubtitles =", "= [] #videoids = [u'jDAZHgL-nG4', u'cMNTnd8pApk', u's5hLO6T_BhY', u'gOAoCh5Mecc', u'p0PX5s6k5DU', u'hSmPkLqOt0M', u'2Ik7_biRs9g', u'G64A_hpNWfI', u'ZzVVEcGekv0',", "print \"exiting application...\" time.sleep(2) exit() print \"\\n\" elif combineSubtitles == True: #in this", "in ascending order and that there are no mistakes (see README.md) and restart", "'/' + fileName + '.txt').st_size except Exception as e: print e print \"The", "'rb') as f: videoids = pickle.load(f) print \"\\nThe video IDs are composed of", "(uploadVideos must still be set to True): resumeUploads = False #ES: upload snippet", "tool, please select which processes you would like to run: \\n\\n\" time.sleep(2) answer", "% texts[c-1]) #time.sleep(.1) texts.append(\"\") texts[c] = \"\" #t = t.replace(\" \", \"\") #t", "that comes before it (\",str(sp1[1]),\" seconds), but it is smaller.\" print \"Please make", "a = a.lower().strip() if a == 'y' or a == 'n': return a", "#print \"ES: text is the following\" + str(text) #ES: strip whitespace text =", "an array. if key[-2:] == '[]': key = key[0:len(key)-2:] is_array = True if", "{} ref = ref[key] else: # For example, the property is \"snippet.description\", and", "time running this tool on the files you have indicated, you will temporarily", "with open(folderName + \"/\" + 'captionsids.pkl', 'wb') as f: pickle.dump(captionsids, f) print \"Waiting", "#folderName = 'oscar' #fileName = 'oscar' #originalVideo = \"Oscar.mp4\" ### START BOILERPLATE CODE", "upload the full video and compiled transcript to your Youtube account once complete", "you will need to create a list of timestamps demarcating the length of", "a \"snippet\" object. Create the snippet object here. # Setting \"ref = ref[key]\"", "fileName + \".txt' was cut into \" + str(len(splits)) + \" text snippets", "UPLOADS THE VIDEOS if uploadVideos == True: #ES: the following is called when", "one list item (a text) until a timestamp is reached #ES: if t", "== \"n\": print \"Please make sure you have the available space on your", "body=resource, **kwargs ).execute() print_results(results) #'snippet.playlistId': playlistID, playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId': vid, 'snippet.position': ''},", "== 'n': downloadCaptions = False elif answer == '': downloadCaptions = True answer", "service.videos().insert( body=resource, media_body=MediaFileUpload(media_file, chunksize=-1, resumable=True), **kwargs ) vid = resumable_upload(request, 'video', 'insert') #", "file named '\" + originalVideo + \"' does not exist in the folder", "keyword arguments that are not set def remove_empty_kwargs(**kwargs): good_kwargs = {} if kwargs", "+ t1 else: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t1]) t_list.append(t1) t0 = t1", "'\" + originalVideo + \"' does not exist in the folder '\" +", "0 num = 0 #print str(splits) #print str(t_list) for sp in splits: if", "== 'y' or a == 'n': return a else: a = raw_input(\"Please answer", "subtitles that are hard to read) resampleSubtitles = False #ES: IF you enabled", "True: a = a.lower().strip() if a == 'y' or a == 'n': return", "(?) splits = [] #list of cut-up texts texts = [\"\"] t0 =", "Youtube into a total subtitle file. combineSubtitles = True #ES: the following switches", "'': snipVideos = True answer = raw_input(\"\\n2/7 Will you be uploading video snippets", "file. combineSubtitles = True #ES: the following switches control how subtitles are altered", "into text snippets based on its timestamps,\\n- snips the associated video accordingly into", ").execute() c += 1 time.sleep(10) if combineSubtitles == True: #compiles them all print", "\"\\nIf all input was correct, the program will begin snipping\" yes_or_no(question) print \"\\n1.", "== '': return True if reply[0] == 'n': exit() if uploadVideos == False", "+ int(float(t_1[1])*60) + int(float(t_1[2])) return [t0,t1] def s_to_hms(seconds): m, sec = divmod(seconds, 60)", "(y/n) \") answer = verify_y_n(answer) if answer == \"n\": print \"Please make sure", "+= subtitle cc = \"\" if c < 10: cc = \"0\" +", "added for guidance. they can be removed. #ES: a list of the transcript's", "t t = t.replace('[','').replace(']','').replace('\\n','') t = unicode(t, \"UTF-8\") #split the timestamps at :", "### END BOILERPLATE CODE # Sample python code for videos.insert def videos_insert(properties, media_file,", "use an SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME = \"youtube\" API_VERSION = \"v3\"", "into \" + str(len(splits)) + \" parts & uploading videos...\" time.sleep(1) if len(videoids)", "fileName + \"_\" + str(c) + \".txt\" #print s,media_file,caption_file,videoids[c-1] a = service.captions().insert( part=\"snippet\",", "\"UTF-8\") time = time.split(\" --> \") t_0 = time[0].split(\":\") t_1 = time[1].split(\":\") t0", "u'G64A_hpNWfI', u'ZzVVEcGekv0', u'ZxKJhN3JFfI', u'TsDnqWmpvrw', u'Kvem1XnPHF0', u'VwqhkmbiLh0', u'V1sv1MYLdC0'] #videoids = [u'cj62vgUfnik', u'5k9WCcWCLiU', u'MexTd0EGfRc', u'hWY_30yHOec',", "= True elif answer == 'n': removeLoneWords = False elif answer == '':", "for s in splits: print c,s media_file = folderName + '/' + fileName", "True or deleteVideos == True: args = argparser.parse_args() service = get_authenticated_service(args) def print_results(results):", "== True or deleteVideos == True or uploadTranscripts == True: with open(folderName +", "[] #list of cut-up texts texts = [\"\"] t0 = 0 c =", "exponential backoff strategy to resume a # failed upload. def resumable_upload(request, resource, method):", "correctly. Consult README.md for guidelines on proper timestamp formatting.\" print \"\\nexiting application...\" time.sleep(2)", "= raw_input(\"\\n4/7 Will you be uploading text snippets for syncing with your video", "exist in the current directory. Please see README.md for instructions.\" print \"exiting application...\"", "#print \"c: \" + str(c) with open(folderName + \"/\" + fileName + \"_\"", "subtitle processing is complete deleteVideos = False #ES: upload the full video and", "True: # try: subtitle = service.captions().download(id=captionsids[c-1],tfmt='vtt').execute() # waitLonger = False # except: #", "== 'y' or a == 'n' or a == '': return a else:", "pickle.dump(captionsids, f) print \"Waiting for transcripts to be processed into captions. It is\",strftime(\"%H:%M:%S\",", "continue where you left off (uploadVideos must still be set to True): resumeUploads", "answer = raw_input(\"\\n4/7 Will you be uploading text snippets for syncing with your", "'', 'status.privacyStatus': 'unlisted', 'status.publicStatsViewable': ''}, folderName + \"/\" + originalVideo, part='snippet,status') # place", "placeBasedTimestamping = False elif answer == '': placeBasedTimestamping = False print \"\\n\" folderName", "uploads the text snippets to Youtube as transcript files for these video snippets,\\n-", "of your timestamp list (excluding the \\\".txt\\\" extention): \") else: print \"You have", "u'2dRf-EbKYHA', u'RUgi4NfoPEw', u'n40bGD_9eZI', u'OWWAQTGKyMI', u'8a2De6Gzfek', u'VQJgxR3iAoA', u'UEzrAMq6fGc', u'PXCHMF-Z7X4', u'SU_Rbp9V_Zo', u'VLhSxDh9gI0', u'80rY1RlbVQw', u'1yumt5fRBF4', u'u5qAHXhhJoo',", "'': resumeUploads = False answer = raw_input(\"\\n4/7 Will you be uploading text snippets", "to thefile thefile.write(\"%s\\n\" % texts[c-1]) #time.sleep(.1) texts.append(\"\") texts[c] = \"\" #t = t.replace(\"", "+ originalVideo, part='snippet,status') # place video in custom playlist def playlist_items_insert(properties, **kwargs): resource", "HTTP transport library not to retry, since # we are handling retry logic", "timestamp is reached #ES: if t is a timestamp #ES: removing punctuation from", "client_secret. \"\"\" to create a client secret file: google apis dashboard --> create", "num > 0: if sp[1] <= sp1[1]: print \"\\nThere is a problem with", "time stamping can be set to True or False (make a variable for", ").execute() print_results(results) #'snippet.playlistId': playlistID, playlist_items_insert( {'snippet.resourceId.kind': 'youtube#video', 'snippet.resourceId.videoId': vid, 'snippet.position': ''}, part='snippet', onBehalfOfContentOwner='')", "\"31\": print subtitle c += 1 time.sleep(3) #deletes videos from youtube -ES if", "must still be set to True): resumeUploads = False #ES: upload snippet transcripts", "the file name of your transcript (excluding the \\\".txt\\\" extention): \") try: verifyExistence", "\" + str(c) with open(folderName + \"/\" + fileName + \"_\" + str(c)", "# waitLonger = False # except: # waitLonger = True # print \"Waiting", "a list of cumulative times so that the rest of the pipeline can", "for processing. YouTube allows a maximum of 100 video uploads per 24h using", "when concatenating snippets (i.e. when combineSubtitles = True) #ES A feature created by", "that the rest of the pipeline can run if combine_only == True: t1", "by hitting 'Enter': \") continue print \"\\n\\n\" print \"This application creates subtitles for", "API credentials. Continue?\" print \"\\nIf all input was correct, the program will begin", "the available space on your hard drive, and then restart the program.\" print", "== 'y': resampleSubtitles = True elif answer == 'n': resampleSubtitles = False elif", "# This variable defines a message to display if the CLIENT_SECRETS_FILE is #", "exit() #ES: UPLOADS THE VIDEOS if uploadVideos == True: #ES: the following is", "any point by pressing Ctrl+C (Cmd+C on Mac).\" time.sleep(1) print \"\\n\" print \"This", "Continue? (y/n) \") if verifyLanguage.lower() == '' or 'y': break #if combineSubtitles ==", "open(folderName + \"/\" + fileName + \".txt\", 'r') as myfile: text = myfile.read().replace('\\n',", "[HH:MM:SS.00], followed by a newline.\\n\\nPlease enter the file name of your timestamp list", "myfile.read().replace('\\n', '') with open(folderName + \"/\" + \"delete me.txt\") as f: text =", "\"/\" + fileName + \"_\" + str(c) + \".txt\", 'w') as thefile: #thefile", "(e.g. the interviewer or interviewee's names) that precede their discourse (e.g. \\\"Emmanuel: Hi,", "restart by uploading all videos. You may need to remove any previously-uploaded videos", "valid file location.') print \"\\nSnipping completed. No further options were selected. Exiting...\" exit()", "== True: print \"\\nDownloading captions...\" c = 1 waitLonger = True for s", "if not t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and t != \"\\n\": #ES: add t to position c", "youtube api click \"YouTube Data API v3\" and ENABLE it click \"create credentials\"", "has yet to be explored... placeBasedTimestamping = False #ES: resample subtitles to prevent", "your video and transcript or the intended language code of your subtitles (e.g.", "deleteVideos = False elif answer == '': deleteVideos = False answer = raw_input(\"\\n7/7", "in the transcript: \") print \"\\n\" #____________# # let rodolphe know if there", "condition is never met. if t != \"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\" in", "#c += 1 wait = True with open(folderName + \"/\" + 'videoids.pkl', 'wb')", "language, status) c = 1 captionsids = [] wait = False if uploadTranscripts", "video snippets,\\n- uploads these video snippets to Youtube as private videos only visible", "is ever called... # Call the API's captions.insert method to upload a caption", "#print \"Uploaded caption track '%s(%s) in '%s' language, '%s' status.\" % (name, #", "'y': uploadTranscripts = True elif answer == 'n': uploadTranscripts = False elif answer", "\"E.H.\" #interviewee = \"E.M.\" #fileName = 'Frederic' #originalVideo = \"Frederic.mov\" #interviewer = \"M.M.\"", "<KEY> #client id is in client_id.json CLIENT_SECRETS_FILE = \"client_id.json\" # This OAuth 2.0", "into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will resume in \" + str(2 *", "code can properly run.\" exit() #ES print texts[c] #print \"splits: \" + str(splits)", "Would you like to reorganize subtitles to remove lone words? (Experimental) (n) \")", "'/' + fileName + \"_\" + str(c) + \".mp4\" if not os.path.exists(media_file): exit('Please", "since # we are handling retry logic ourselves. httplib2.RETRIES = 1 # Maximum", "\") answer = verify_y_n_none(answer) if answer == 'y': resumeUploads = True elif answer", "u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0', u'2dRf-EbKYHA', u'RUgi4NfoPEw',", "if wait == True: print \"\\nWaiting for videos to be processed. It is\",strftime(\"%H:%M:%S\",", "\"\\n6.3 If your transcript has speaker names (e.g. the interviewer or interviewee's names)", "verify_y_n_none(a): while True: a = a.lower().strip() if a == 'y' or a ==", "credentials is None or credentials.invalid: credentials = run_flow(flow, storage, args) # Trusted testers", "code of your video and transcript or the intended language code of your", "str(int(m)) + \":\" + str(int(sec)) #ES: open anita/Anita.txt as myfile try: with open(folderName", "+ \"_\" + str(c) +\".mp4\") media_file = folderName + '/' + fileName +", "# print s_to_hms(i[0]),\"->\",s_to_hms(i[1]) #time.sleep(60) #print splits,splits[len(splits)-1][1] #splits.append([splits[len(splits)-1][1],7200]) #print splits #print \"Wait\" #time.sleep(30) c", "retry logic ourselves. httplib2.RETRIES = 1 # Maximum number of times to retry", "between 2 timestamps, at the least. #place-based time stamping can be set to", "import httplib2 import os import sys import httplib import random from apiclient.discovery import", "not apply to your transcript, simply leave the following two answers blank by", "+ float(t[2]) splits.append([t0,t0+t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t0", "yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits)) + \" parts\" time.sleep(1) for", "requires requests to use an SSL connection. YOUTUBE_READ_WRITE_SSL_SCOPE = \"https://www.googleapis.com/auth/youtube.force-ssl\" API_SERVICE_NAME = \"youtube\"", "u'GrMtKARI9kQ', u'YDHnQAE7U0w', u'yc4IXkGHuXs', u'ZauR51lBjQo', u'kisoEOTjmVI', u'V9XdpjtUU4Q', u'eOdKfhePfTs', u'AAQ9YuybUxM', u'3BaTzSSL4_c', u'OriOoB5yF0s', u'91qOFKithgE', u'WQJQkGEwG-Q', u'n4eW0T6Oek0',", "uploadFull = False #ES: combine vtt snippets that were downloaded from Youtube into", "to Youtube as transcript files for these video snippets,\\n- allows Youtube to sync", "= myfile.read().replace('\\n', '') #print \"ES: replace \\\\n with ''\" with open(folderName + \"/\"", "return a else: a = raw_input(\"Please answer 'y' or 'n': \") continue def", "fast subtitles that are hard to read) (n) \") answer = verify_y_n_none(answer) if", "as an array. if key[-2:] == '[]': key = key[0:len(key)-2:] is_array = True", "print e print \"\\n One of your timestamps isn't formatted correctly. Consult README.md", "\"Please set the variable 'snipTranscript' to True so that the code can properly", "this case, the user has chosen to only combine subtitles. the switch combine_only", "to what story you want to process - ES #interviewer = \"C.V.\" #interviewee", "me.txt\",\"w+\") foo.close() with open(folderName + \"/\" + \"delete me.txt\", 'r') as myfile: text", "subtitle (verify)) removeLoneWords = False #____________# #ES: USER INTERVIEW SECTION def verify_y_n(a): while", "have already used this tool, please select which processes you would like to", "#interviewee = \"B.K.\" #fileName = 'Berthe' #originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer = \"S.G.\" #interviewee", "= time[1].split(\":\") t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2])) t1 = float(int(t_1[0])*3600) +", "the text snippets to Youtube as transcript files for these video snippets,\\n- allows", "fr, es, etc.):\\n(You can refer to the second column in http://www.loc.gov/standards/iso639-2/php/code_list.php for the", "= t.replace(\" \", \"\") #t = t t = t.replace('[','').replace(']','').replace('\\n','') t = unicode(t,", "int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t1 except ValueError as e:", "keeping full sentences intact? (Experimental; this feature is not recommended since subtitle units", "files (.vtt) downloadCaptions = True #ES: delete uploaded video snippets from your Youtube", "'wb') as f: pickle.dump(captionsids, f) print \"Waiting for transcripts to be processed into", "yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits)) + \" parts & uploading", "account once subtitle processing is complete deleteVideos = False #ES: upload the full", "when an apiclient.errors.HttpError with one of these status # codes is raised. RETRIABLE_STATUS_CODES", "HTTP error %d occurred:\\n%s\" % (e.resp.status,e.content) else: raise except RETRIABLE_EXCEPTIONS, e: error =", "= \"Oscar.mp4\" ### START BOILERPLATE CODE # Sample Python code for user authorization", "met. if t != \"\" and t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() and \"[\" in t: #increase pos", "that in the next time through the # \"for pa in range ...\"", "defines a message to display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE =", "already has a \"snippet\" object. ref = ref[key] return resource # Remove keyword", "print \"\\nThe video IDs are composed of the following: \" + str(videoids) #print", "\" timestamps formatted like such '[HH:MM:SS.00]'.\" else: print \"Please set the variable 'snipTranscript'", "set the variable 'snipTranscript' to True so that the code can properly run.\"", "a total subtitle file. combineSubtitles = True #ES: the following switches control how", "len(videoids): ffmpeg_extract_subclip(folderName + \"/\" + originalVideo, s[0], s[1], targetname=folderName + \"/\" + fileName", "with open(folderName + \"/\" + 'videoids.pkl', 'rb') as f:videoids = pickle.load(f) except Exception", "#search_response = service.search().list( # q=\"Anita\", # part=\"id\", # type=\"video\", # fields=\"items/id\" #).execute() #", "\") try: verifyExistence = os.stat(folderName + '/' + originalVideo).st_size except Exception as e:", "= verify_y_n_none(answer) if answer == 'y': uploadVideos = True snipVideos = True elif", "videos_insert(properties, media_file, **kwargs): resource = build_resource(properties) # See full sample for function kwargs", "verifyExistence = os.stat(folderName + '/' + originalVideo).st_size except Exception as e: print e", "\" \" + captionsids[c-1] + \" to be processed into captions. It is\",strftime(\"%H:%M:%S\",", "application creates subtitles for a video for which you have an associated transcript.", "e print \"The folder named '\" + folderName + \"' does not exist", "return good_kwargs ### END BOILERPLATE CODE # Sample python code for videos.insert def", "= run_flow(flow, storage, args) # Trusted testers can download this discovery document from", "= [] #ES: PREPARE INPUT TEXT FOR PROCESSING if snipTranscript == True: for", "= t0 + t1 else: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t1]) t_list.append(t1) t0", "1 if retry > MAX_RETRIES: exit(\"No longer attempting to retry.\") max_sleep = 2", "Maximum number of times to retry before giving up. MAX_RETRIES = 10 #", "MAX_RETRIES = 10 # Always retry when these exceptions are raised. RETRIABLE_EXCEPTIONS =", "and 'id' in response: print \"Video id '%s' was successfully uploaded.\" % response['id']", "retry when an apiclient.errors.HttpError with one of these status # codes is raised.", "None: if method == 'insert' and 'id' in response: print \"Video id '%s'", "import os import sys import httplib import random from apiclient.discovery import build from", "will be a property in that object. prop_array = p.split('.') ref = resource", "would like to run. If this is your first time running the tool,", "(\",str(sp1[1]),\" seconds), but it is smaller.\" print \"Please make sure your timestamps are", "\"B.K.\" #fileName = 'Berthe' #originalVideo = \"DD2FD4AE-FEE4-4DF3-9AF7-A4D6BF453B49.flv\" #interviewer = \"S.G.\" #interviewee = \"O.G.\"", "your video file (\" + str(videoSize) + \" Mb).\" yes_or_no(question) print \"\\n1. Slicing", "None: try: print \"Uploading file...\" status, response = request.next_chunk() if response is not", "uploadTranscripts == True: with open(folderName + \"/\" + 'videoids.pkl', 'rb') as f: videoids", "MAX_RETRIES: exit(\"No longer attempting to retry.\") max_sleep = 2 ** retry sleep_seconds =", "uploadTranscripts == True: #print splits,videoids #uploads transcripts print \"\\nUploading transcripts...\" for s in", "+ 'videoids.pkl', 'rb') as f:videoids = pickle.load(f) except Exception as e: print e", "into \" + str(len(splits)) + \" text snippets based on it containing \"", "for which you have an associated transcript. Make sure you have gone over", "on youtube.com and then restart the program.\" uploadVideos = True wait = False", "answer == 'y': deleteVideos = True elif answer == 'n': deleteVideos = False", "IOError as e: print \"No text file found because you are not running", "except Exception as e: print e print \"The file named '\" + originalVideo", "(above), you have the option to make subtitle entries full sentences (not recommended,", "is <KEY> #client id is in client_id.json CLIENT_SECRETS_FILE = \"client_id.json\" # This OAuth", "feature created by RG that has yet to be explored... placeBasedTimestamping = False", "credntials like keys\" search for youtube api click \"YouTube Data API v3\" and", "\"delete me.txt\", 'r') as myfile: text = myfile.read().replace('\\n', '') with open(folderName + \"/\"", "before it (\",str(sp1[1]),\" seconds), but it is smaller.\" print \"Please make sure your", "syncing? (y) \") answer = verify_y_n_none(answer) if answer == 'y': uploadVideos = True", "True: print \"\\nWaiting for videos to be processed. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will", "fileName + \".srt\" service.captions().insert( part=\"snippet\", body=dict( snippet=dict( videoId=id, language=language, name=originalVideo, isDraft=True, sync=False )", "caption track '%s(%s) in '%s' language, '%s' status.\" % (name, # id, language,", "lone words? (Experimental) (n) \") answer = verify_y_n_none(answer) if answer == 'y': removeLoneWords", "\") continue def verify_y_n_none(a): while True: a = a.lower().strip() if a == 'y'", "verify_y_n_none(answer) if answer == 'y': removeLoneWords = True elif answer == 'n': removeLoneWords", "add t to position c of texts texts[c] += t#.encode('utf8') #print t.replace('[','').replace(']','').replace(':','').replace('.','').replace('\\n','').isdigit() #ES:", "of times to retry before giving up. MAX_RETRIES = 10 # Always retry", "service.search().list( # q=\"Anita\", # part=\"id\", # type=\"video\", # fields=\"items/id\" #).execute() # #videos =", "names? (Experimental) (n) \") answer = verify_y_n_none(answer) if answer == 'y': placeBasedTimestamping =", "were \" + str(len(splits)) + \" timestamps detected in \" + fileName +", "ascending order...\" sp1 = 0 num = 0 #print str(splits) #print str(t_list) for", "presence of place names? (Experimental) (n) \") answer = verify_y_n_none(answer) if answer ==", "+ str(int(sec)) #ES: open anita/Anita.txt as myfile try: with open(folderName + \"/\" +", "+ \"delete me.txt\",\"w+\") foo.close() with open(folderName + \"/\" + \"delete me.txt\", 'r') as", "RETRIABLE_STATUS_CODES = [500, 502, 503, 504] # This method implements an exponential backoff", "#list of cut-up texts texts = [\"\"] t0 = 0 c = 0", "splits.append([t0,t0+t1]) #print int(t[0])*3600 + int(t[1])*60 + int(t[2]) t_list.append(t1) t0 = t0 + t1", "for s in splits: print c,videoids[c-1] service.videos().delete( id=videoids[c-1] ).execute() c += 1 time.sleep(10)", "\"Waiting for transcripts to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\". Script will", "will resume in \" + str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) id = vid", "True for other processes to run) snipVideos = True #ES: upload video snippets", "left off (uploadVideos must still be set to True): resumeUploads = False #ES:", "(n) \") answer = verify_y_n_none(answer) if answer == 'y': removeLoneWords = True elif", "to \",str(sp[1]),\" seconds) should be a larger number than the timestamp that comes", "txt files are stored #folderName = 'venant' #fileName refers to the name of", "digit (False) and therefore the following condition is almost always met. if not", "+= 1 time.sleep(10) if combineSubtitles == True: #compiles them all print \"\\nCombining subtitle", "\" Mb available space on your hard drive to run this program. Continue?", "empty values out of the inserted resource. def build_resource(properties): resource = {} for", "except IOError as e: print \"No text file found because you are not", "detected in \" + fileName + \". \" + str(len(splits)) + \" video", "print e print \"The file named '\" + fileName + \".txt' does not", "timestamps (must be set to True for other processes to run) snipVideos =", "exit() print \"\\n\" videoSize = os.stat(folderName + '/' + originalVideo).st_size/1000000 answer = raw_input(\"If", "2.0 information for this application, including its client_id and # client_secret. \"\"\" to", "api click \"YouTube Data API v3\" and ENABLE it click \"create credentials\" create", "t: #increase pos on texts by 1 c += 1 #ES: printing deets", "the property is \"snippet.description\", and the resource # already has a \"snippet\" object.", "reached #ES: if t is a timestamp #ES: removing punctuation from '[00:00:01.09]' since", "\" + captionsids[c-1] + \" to be processed into captions. It is\",strftime(\"%H:%M:%S\", localtime()),\".", "time as needed - ES #adjust switches as needed sleepingTime = 400 #___SWITCHES(defaults)___#", "and transcript or the intended language code of your subtitles (e.g. en, fr,", "to create a playlist in youtube online and copy url id to script", "for function return vid def hms_to_s(time): time = unicode(time, \"UTF-8\") time = time.split(\"", "True: t1 = int(t[0])*60 + int(t[1]) splits.append([t0,t0+t1]) t_list.append(t1) t0 = t0 + t1", "t1 = int(t[0])*3600 + int(t[1])*60 + float(t[2]) splits.append([t0,t0+t1]) #print int(t[0])*3600 + int(t[1])*60 +", "will begin snipping\" yes_or_no(question) print \"\\n1. Slicing into \" + str(len(splits)) + \"", "a caption track in draft status. def upload_caption(youtube, video_id, language, name, file): insert_result", "resuming video uploads from a previously-initiated process? (n) \") answer = verify_y_n_none(answer) if", "each video to which your subtitle files are associated. These values will be", "\"youtube\" API_VERSION = \"v3\" # This variable defines a message to display if", "(i.e. when combineSubtitles = True) #ES A feature created by RG that has", "thefile: #thefile = open(folderName + \"/\" + fileName + \"_\" + str(c) +", "httplib.CannotSendRequest, httplib.CannotSendHeader, httplib.ResponseNotReady, httplib.BadStatusLine) # Always retry when an apiclient.errors.HttpError with one of", "good_kwargs[key] = value return good_kwargs ### END BOILERPLATE CODE # Sample python code", "page # and it should be in the same directory with the code.", "that object. prop_array = p.split('.') ref = resource for pa in range(0, len(prop_array)):", "'y': snipVideos = True elif answer == 'n': snipVideos = False elif answer", "\"0\" + str(c) else: cc = str(c) #print subtitle print cc with open(folderName", "or 'n': \") continue def verify_y_n_none(a): while True: a = a.lower().strip() if a", "appears in the transcript: \") interviewee = raw_input(\"\\n6.3.2 Please input your interviewee's name", "adjacent subtitle (verify)) removeLoneWords = False #____________# #ES: USER INTERVIEW SECTION def verify_y_n(a):", "you have the available space on your hard drive, and then restart the", "it (\",str(sp1[1]),\" seconds), but it is smaller.\" print \"Please make sure your timestamps", "e if error is not None: print error retry += 1 if retry", "0: print \"(However, it looks like \",len(videoids),\" video snippets were already uploaded to", "fileName + '.txt').st_size except Exception as e: print e print \"The file named", "#client id is in client_id.json CLIENT_SECRETS_FILE = \"client_id.json\" # This OAuth 2.0 access", "at : (into 3) t = t.split(\":\") if len(t) > 3 or len(t)", "cc = \"\" if c < 10: cc = \"0\" + str(c) else:", "on the size of your video file (\" + str(videoSize) + \" Mb).\"", "if sp[1] <= sp1[1]: print \"\\nThere is a problem with one of your", "transcript (.txt) into text snippets based on its timestamps,\\n- snips the associated video", "e print \"\\nThe program is unable to resume uploads because there are no", "upload video snippets uploadVideos = True #ES: if the video/caption upload process was", "a else: a = raw_input(\"Please answer 'y' or 'n', or leave the answer", "+ str(c) + \".txt\", 'w') try: #ES: write the previous position of c", "longer attempting to retry.\") max_sleep = 2 ** retry sleep_seconds = random.random() *", "+ str(sleepingTime/60) + \" minutes...\" time.sleep(sleepingTime) id = vid print \"\\nUploading compiled subtitles...\"", "t != \"\\n\": #ES: add t to position c of texts texts[c] +=", "folder '\" + folderName + \"'. Please see README.md for instructions.\" print \"exiting", "answer == 'n': downloadCaptions = False elif answer == '': downloadCaptions = True", "flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE, message=MISSING_CLIENT_SECRETS_MESSAGE) storage = Storage(\"youtube-api-snippets-oauth2.json\") credentials = storage.get() if credentials is None", "formatted correctly. Consult README.md for guidelines on proper timestamp formatting.\" print \"\\nexiting application...\"", "for user authorization import httplib2 import os import sys import httplib import random" ]
[ "os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %% <load data> Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 =", "== '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %% <load data> Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json')", "fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'), ax=axes[0]) fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)],", "# Lreu_ca_gp have more figure, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1", "__name__ == '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %% <load data> Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp =", "Lreu_ca_reas = [i.id for i in Lreu_ca.reactions] Lreu_ca_gp_reas = [i.id for i in", "venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram positive' ), ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram", "# %% <fig compare Lreu_ca and Lreu_ca_gp> # Lreu_ca_gp have more figure, axes", "fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram positive'), ax=axes[1]) fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram positive'),", "i in Lreu_ca.reactions] Lreu_ca_gp_reas = [i.id for i in Lreu_ca_gp.reactions] Lreu_ca_mets = [i.id", "Lreu_from_iNF517_reas = [i.id for i in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets = [i.id for i in", "for i in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets = [i.id for i in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes =", "Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets = [i.id for i in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes = [i.id for i", "for i in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes = [i.id for i in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas =", "Lreu_from_iBT721_reas = [i.id for i in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets = [i.id for i in", "i in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas = [i.id for i in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets = [i.id", "<NAME> at 2019-05-27 \"\"\"Step_01_model_comparison.py :description : script :param : :returns: :rtype: \"\"\" import", "in Lreu_ca_gp.reactions] Lreu_ca_mets = [i.id for i in Lreu_ca.metabolites] Lreu_ca_gp_mets = [i.id for", "Lreu_ca_mets = [i.id for i in Lreu_ca.metabolites] Lreu_ca_gp_mets = [i.id for i in", "for i in Lreu_from_iML1515.metabolites] # %% <fig compare templated based method models and", "at 2019-05-27 \"\"\"Step_01_model_comparison.py :description : script :param : :returns: :rtype: \"\"\" import os", "for i in Lreu_ca.metabolites] Lreu_ca_gp_mets = [i.id for i in Lreu_ca_gp.metabolites] # %%", "in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas = [i.id for i in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets = [i.id for", "i in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes = [i.id for i in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas = [i.id", "Lreu_from_iBT721_genes = [i.id for i in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas = [i.id for i in", "= cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 =", "i in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets = [i.id for i in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes = [i.id", "fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'), ax=axes[1]) fg_3 = My_def.venn3_samesize([set(Lreu_from_iBT721_mets), set(Lreu_from_iNF517_mets), set(Lreu_from_iML1515_mets)],", "for i in Lreu_ca.genes] Lreu_ca_gp_genes = [i.id for i in Lreu_ca_gp.genes] Lreu_ca_reas =", "cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes", "Lreu_ca_gp_reas = [i.id for i in Lreu_ca_gp.reactions] Lreu_ca_mets = [i.id for i in", "('Normal','Gram positive'), ax=axes[1]) fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram positive'), ax=axes[2]) plt.show() Lreu_from_iBT721_genes =", ": :returns: :rtype: \"\"\" import os import cobra from matplotlib import pyplot as", "plt.show() Lreu_from_iBT721_genes = [i.id for i in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas = [i.id for i", "plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'), ax=axes[0])", "= My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'), ax=axes[1]) fg_3 = My_def.venn3_samesize([set(Lreu_from_iBT721_mets), set(Lreu_from_iNF517_mets), set(Lreu_from_iML1515_mets)], ('iBT721',", "Lreu_ca_gp.genes] Lreu_ca_reas = [i.id for i in Lreu_ca.reactions] Lreu_ca_gp_reas = [i.id for i", "matplotlib_venn import venn2 import pandas as pd import My_def from My_def.model_report import *", "positive' ), ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram positive'), ax=axes[1]) fg3", "3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'), ax=axes[0]) fg_2", "= [i.id for i in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas = [i.id for i in Lreu_from_iNF517.reactions]", "sep='\\t') Lreu_ca_genes = [i.id for i in Lreu_ca.genes] Lreu_ca_gp_genes = [i.id for i", "Lreu_from_iNF517.genes] Lreu_from_iNF517_reas = [i.id for i in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets = [i.id for i", "and Lreu_ca_gp> # just a overview figure_2, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\")", "i in Lreu_ca_gp.reactions] Lreu_ca_mets = [i.id for i in Lreu_ca.metabolites] Lreu_ca_gp_mets = [i.id", "= [i.id for i in Lreu_ca_gp.metabolites] # %% <fig compare Lreu_ca and Lreu_ca_gp>", "set(Lreu_ca_reas)], ('Normal','Gram positive'), ax=axes[1]) fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram positive'), ax=axes[2]) plt.show() Lreu_from_iBT721_genes", "[i.id for i in Lreu_from_iML1515.metabolites] # %% <fig compare templated based method models", "'iNF517','iML1515'), ax=axes[0]) fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'), ax=axes[1]) fg_3 = My_def.venn3_samesize([set(Lreu_from_iBT721_mets),", "compare Lreu_ca and Lreu_ca_gp> # Lreu_ca_gp have more figure, axes = plt.subplots(1, 3)", "[i.id for i in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes = [i.id for i in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas", "for i in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas = [i.id for i in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets =", "figure_2, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)],", "axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'), ax=axes[0]) fg_2 =", "[i.id for i in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas = [i.id for i in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets", "pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes = [i.id for i in Lreu_ca.genes]", "import pandas as pd import My_def from My_def.model_report import * if __name__ ==", "positive'), ax=axes[1]) fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram positive'), ax=axes[2]) plt.show() Lreu_from_iBT721_genes = [i.id", "axes[2].set_title(\"met\") fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'), ax=axes[0]) fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas),", "set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'), ax=axes[0]) fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'), ax=axes[1]) fg_3", "[i.id for i in Lreu_ca_gp.reactions] Lreu_ca_mets = [i.id for i in Lreu_ca.metabolites] Lreu_ca_gp_mets", "[i.id for i in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas = [i.id for i in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets", "# fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram positive'), ax=axes[1]) fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)],", "# %% <load data> Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json')", "* if __name__ == '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %% <load data> Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json')", ":description : script :param : :returns: :rtype: \"\"\" import os import cobra from", "cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv',", "axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'), ax=axes[0]) fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas),", "('iBT721', 'iNF517','iML1515'), ax=axes[0]) fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'), ax=axes[1]) fg_3 =", "Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv',", ":rtype: \"\"\" import os import cobra from matplotlib import pyplot as plt from", "plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram positive' ), ax=axes[0])", "= [i.id for i in Lreu_ca.genes] Lreu_ca_gp_genes = [i.id for i in Lreu_ca_gp.genes]", "ax=axes[0]) fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'), ax=axes[1]) fg_3 = My_def.venn3_samesize([set(Lreu_from_iBT721_mets), set(Lreu_from_iNF517_mets),", "venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram positive'), ax=axes[1]) fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram positive'), ax=axes[2]) plt.show()", "Lreu_from_iML1515_genes = [i.id for i in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas = [i.id for i in", "'__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %% <load data> Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517", "bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes = [i.id for i in Lreu_ca.genes] Lreu_ca_gp_genes =", "script :param : :returns: :rtype: \"\"\" import os import cobra from matplotlib import", "= [i.id for i in Lreu_ca_gp.genes] Lreu_ca_reas = [i.id for i in Lreu_ca.reactions]", "for i in Lreu_ca_gp.genes] Lreu_ca_reas = [i.id for i in Lreu_ca.reactions] Lreu_ca_gp_reas =", "= [i.id for i in Lreu_ca_gp.reactions] Lreu_ca_mets = [i.id for i in Lreu_ca.metabolites]", "bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes = [i.id for i", "fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram positive'), ax=axes[1]) fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram", "3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram positive' ), ax=axes[0]) #", "Lreu_from_iBT721.genes] Lreu_from_iBT721_reas = [i.id for i in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets = [i.id for i", "Lreu_ca.metabolites] Lreu_ca_gp_mets = [i.id for i in Lreu_ca_gp.metabolites] # %% <fig compare Lreu_ca", "for i in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes = [i.id for i in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas =", "axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram positive' ), ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine')", "i in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas = [i.id for i in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets = [i.id", "i in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas = [i.id for i in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets = [i.id", "<load data> Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 =", "= [i.id for i in Lreu_ca.reactions] Lreu_ca_gp_reas = [i.id for i in Lreu_ca_gp.reactions]", "in Lreu_from_iML1515.metabolites] # %% <fig compare templated based method models and Lreu_ca_gp> #", "pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes = [i.id for i in Lreu_ca.genes] Lreu_ca_gp_genes = [i.id for", "= plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'),", "and Lreu_ca_gp> # Lreu_ca_gp have more figure, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\")", "= plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram positive' ),", "import pyplot as plt from matplotlib_venn import venn2 import pandas as pd import", "in Lreu_ca.reactions] Lreu_ca_gp_reas = [i.id for i in Lreu_ca_gp.reactions] Lreu_ca_mets = [i.id for", "in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets = [i.id for i in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes = [i.id for", "figure, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram", "-*- coding: utf-8 -*- # Created by <NAME> at 2019-05-27 \"\"\"Step_01_model_comparison.py :description :", "os import cobra from matplotlib import pyplot as plt from matplotlib_venn import venn2", "import My_def from My_def.model_report import * if __name__ == '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %%", "i in Lreu_ca.metabolites] Lreu_ca_gp_mets = [i.id for i in Lreu_ca_gp.metabolites] # %% <fig", "Lreu_ca.reactions] Lreu_ca_gp_reas = [i.id for i in Lreu_ca_gp.reactions] Lreu_ca_mets = [i.id for i", "if __name__ == '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %% <load data> Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp", "in Lreu_ca_gp.metabolites] # %% <fig compare Lreu_ca and Lreu_ca_gp> # Lreu_ca_gp have more", "= [i.id for i in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes = [i.id for i in Lreu_from_iNF517.genes]", "Lreu_from_iML1515.metabolites] # %% <fig compare templated based method models and Lreu_ca_gp> # just", "sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes = [i.id for i in Lreu_ca.genes] Lreu_ca_gp_genes", "fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram positive'), ax=axes[2]) plt.show() Lreu_from_iBT721_genes = [i.id for i", "cobra from matplotlib import pyplot as plt from matplotlib_venn import venn2 import pandas", "from My_def.model_report import * if __name__ == '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %% <load data>", "= cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df =", "more figure, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)],", "in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets = [i.id for i in Lreu_from_iML1515.metabolites] # %% <fig compare", "matplotlib import pyplot as plt from matplotlib_venn import venn2 import pandas as pd", "My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'), ax=axes[1]) fg_3 = My_def.venn3_samesize([set(Lreu_from_iBT721_mets), set(Lreu_from_iNF517_mets), set(Lreu_from_iML1515_mets)], ('iBT721', 'iNF517','iML1515'),", "# %% <fig compare templated based method models and Lreu_ca_gp> # just a", "for i in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets = [i.id for i in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes =", "Lreu_ca and Lreu_ca_gp> # Lreu_ca_gp have more figure, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\")", "i in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets = [i.id for i in Lreu_from_iML1515.metabolites] # %% <fig", "[i.id for i in Lreu_ca.metabolites] Lreu_ca_gp_mets = [i.id for i in Lreu_ca_gp.metabolites] #", "Lreu_ca.genes] Lreu_ca_gp_genes = [i.id for i in Lreu_ca_gp.genes] Lreu_ca_reas = [i.id for i", "in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes = [i.id for i in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas = [i.id for", "Lreu_from_iML1515.genes] Lreu_from_iML1515_reas = [i.id for i in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets = [i.id for i", "-*- # Created by <NAME> at 2019-05-27 \"\"\"Step_01_model_comparison.py :description : script :param :", "= venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram positive' ), ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)],", "# just a overview figure_2, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1", "cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df =", "Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t')", "venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram positive'), ax=axes[2]) plt.show() Lreu_from_iBT721_genes = [i.id for i in Lreu_from_iBT721.genes]", "i in Lreu_ca_gp.genes] Lreu_ca_reas = [i.id for i in Lreu_ca.reactions] Lreu_ca_gp_reas = [i.id", "= cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df", "ax=axes[2]) plt.show() Lreu_from_iBT721_genes = [i.id for i in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas = [i.id for", "= [i.id for i in Lreu_from_iML1515.metabolites] # %% <fig compare templated based method", "a overview figure_2, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes),", "Lreu_ca_gp> # just a overview figure_2, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\")", "for i in Lreu_ca.reactions] Lreu_ca_gp_reas = [i.id for i in Lreu_ca_gp.reactions] Lreu_ca_mets =", "Lreu_ca_gp.metabolites] # %% <fig compare Lreu_ca and Lreu_ca_gp> # Lreu_ca_gp have more figure,", "for i in Lreu_ca_gp.reactions] Lreu_ca_mets = [i.id for i in Lreu_ca.metabolites] Lreu_ca_gp_mets =", "for i in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas = [i.id for i in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets =", "= cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t')", "Lreu_from_iNF517_mets = [i.id for i in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes = [i.id for i in", "python # -*- coding: utf-8 -*- # Created by <NAME> at 2019-05-27 \"\"\"Step_01_model_comparison.py", "[i.id for i in Lreu_ca.genes] Lreu_ca_gp_genes = [i.id for i in Lreu_ca_gp.genes] Lreu_ca_reas", "[i.id for i in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets = [i.id for i in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes", "in Lreu_ca.genes] Lreu_ca_gp_genes = [i.id for i in Lreu_ca_gp.genes] Lreu_ca_reas = [i.id for", "%% <load data> Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721", "#!/usr/bin/env python # -*- coding: utf-8 -*- # Created by <NAME> at 2019-05-27", "Lreu_from_iML1515_mets = [i.id for i in Lreu_from_iML1515.metabolites] # %% <fig compare templated based", "based method models and Lreu_ca_gp> # just a overview figure_2, axes = plt.subplots(1,", "data> Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json')", "My_def.model_report import * if __name__ == '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %% <load data> Lreu_ca", ": script :param : :returns: :rtype: \"\"\" import os import cobra from matplotlib", "[i.id for i in Lreu_ca.reactions] Lreu_ca_gp_reas = [i.id for i in Lreu_ca_gp.reactions] Lreu_ca_mets", "from matplotlib import pyplot as plt from matplotlib_venn import venn2 import pandas as", "= venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram positive'), ax=axes[1]) fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram positive'), ax=axes[2])", "[i.id for i in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas = [i.id for i in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets", "Lreu_ca_gp.reactions] Lreu_ca_mets = [i.id for i in Lreu_ca.metabolites] Lreu_ca_gp_mets = [i.id for i", "Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets = [i.id for i in Lreu_from_iML1515.metabolites] # %% <fig compare templated", "set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'), ax=axes[1]) fg_3 = My_def.venn3_samesize([set(Lreu_from_iBT721_mets), set(Lreu_from_iNF517_mets), set(Lreu_from_iML1515_mets)], ('iBT721', 'iNF517','iML1515'), ax=axes[2]) plt.show()", "i in Lreu_from_iML1515.metabolites] # %% <fig compare templated based method models and Lreu_ca_gp>", "in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas = [i.id for i in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets = [i.id for", "<fig compare Lreu_ca and Lreu_ca_gp> # Lreu_ca_gp have more figure, axes = plt.subplots(1,", "i in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes = [i.id for i in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas = [i.id", "Created by <NAME> at 2019-05-27 \"\"\"Step_01_model_comparison.py :description : script :param : :returns: :rtype:", "have more figure, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1 = venn2([set(Lreu_ca_genes),", "# -*- coding: utf-8 -*- # Created by <NAME> at 2019-05-27 \"\"\"Step_01_model_comparison.py :description", "[i.id for i in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets = [i.id for i in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes", "= [i.id for i in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets = [i.id for i in Lreu_from_iML1515.metabolites]", "pyplot as plt from matplotlib_venn import venn2 import pandas as pd import My_def", "i in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets = [i.id for i in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes = [i.id", "in Lreu_ca.metabolites] Lreu_ca_gp_mets = [i.id for i in Lreu_ca_gp.metabolites] # %% <fig compare", "2019-05-27 \"\"\"Step_01_model_comparison.py :description : script :param : :returns: :rtype: \"\"\" import os import", "('Normal','Gram positive'), ax=axes[2]) plt.show() Lreu_from_iBT721_genes = [i.id for i in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas =", "axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram positive'", "= [i.id for i in Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets = [i.id for i in Lreu_from_iNF517.metabolites]", "('Normal','Gram positive' ), ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram positive'), ax=axes[1])", "axes[2].set_title(\"met\") fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram positive' ), ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 =", "overview figure_2, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes),", "import venn2 import pandas as pd import My_def from My_def.model_report import * if", "pandas as pd import My_def from My_def.model_report import * if __name__ == '__main__':", "[i.id for i in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets = [i.id for i in Lreu_from_iML1515.metabolites] #", "set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'), ax=axes[1]) fg_3 = My_def.venn3_samesize([set(Lreu_from_iBT721_mets), set(Lreu_from_iNF517_mets), set(Lreu_from_iML1515_mets)], ('iBT721', 'iNF517','iML1515'), ax=axes[2])", "positive'), ax=axes[2]) plt.show() Lreu_from_iBT721_genes = [i.id for i in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas = [i.id", "coding: utf-8 -*- # Created by <NAME> at 2019-05-27 \"\"\"Step_01_model_comparison.py :description : script", "= [i.id for i in Lreu_ca.metabolites] Lreu_ca_gp_mets = [i.id for i in Lreu_ca_gp.metabolites]", "from matplotlib_venn import venn2 import pandas as pd import My_def from My_def.model_report import", "set(Lreu_ca_gp_mets)], ('Normal','Gram positive'), ax=axes[2]) plt.show() Lreu_from_iBT721_genes = [i.id for i in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas", "= My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'), ax=axes[0]) fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721',", "[i.id for i in Lreu_ca_gp.metabolites] # %% <fig compare Lreu_ca and Lreu_ca_gp> #", "Lreu_from_iML1515_reas = [i.id for i in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets = [i.id for i in", "Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes =", "= [i.id for i in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets = [i.id for i in Lreu_from_iBT721.metabolites]", "axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1 = My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721',", "in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets = [i.id for i in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes = [i.id for", "as plt from matplotlib_venn import venn2 import pandas as pd import My_def from", "as pd import My_def from My_def.model_report import * if __name__ == '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/')", "Lreu_from_iNF517_genes = [i.id for i in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas = [i.id for i in", "Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df", "\"\"\"Step_01_model_comparison.py :description : script :param : :returns: :rtype: \"\"\" import os import cobra", "by <NAME> at 2019-05-27 \"\"\"Step_01_model_comparison.py :description : script :param : :returns: :rtype: \"\"\"", ":returns: :rtype: \"\"\" import os import cobra from matplotlib import pyplot as plt", "Lreu_ca_gp_mets = [i.id for i in Lreu_ca_gp.metabolites] # %% <fig compare Lreu_ca and", "in Lreu_ca_gp.genes] Lreu_ca_reas = [i.id for i in Lreu_ca.reactions] Lreu_ca_gp_reas = [i.id for", "Lreu_ca_gp> # Lreu_ca_gp have more figure, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\")", "Lreu_ca_genes = [i.id for i in Lreu_ca.genes] Lreu_ca_gp_genes = [i.id for i in", "method models and Lreu_ca_gp> # just a overview figure_2, axes = plt.subplots(1, 3)", "templated based method models and Lreu_ca_gp> # just a overview figure_2, axes =", "Lreu_from_iBT721_mets = [i.id for i in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes = [i.id for i in", "ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram positive'), ax=axes[1]) fg3 = venn2([set(Lreu_ca_mets),", "cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515 = cobra.io.load_json_model('Template/Lreu_from_iML1515.json')", "Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes = [i.id for i in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas = [i.id for i", "My_def from My_def.model_report import * if __name__ == '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %% <load", "= venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram positive'), ax=axes[2]) plt.show() Lreu_from_iBT721_genes = [i.id for i in", "for i in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets = [i.id for i in Lreu_from_iML1515.metabolites] # %%", "= [i.id for i in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas = [i.id for i in Lreu_from_iBT721.reactions]", "import * if __name__ == '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') # %% <load data> Lreu_ca =", "= pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes = [i.id for i in Lreu_ca.genes] Lreu_ca_gp_genes = [i.id", "axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram positive' ), ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine') fg2", "for i in Lreu_ca_gp.metabolites] # %% <fig compare Lreu_ca and Lreu_ca_gp> # Lreu_ca_gp", "fg1 = venn2([set(Lreu_ca_genes), set(Lreu_ca_gp_genes)], ('Normal','Gram positive' ), ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 = venn2([set(Lreu_ca_reas),", "models and Lreu_ca_gp> # just a overview figure_2, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\")", "Lreu_ca = cobra.io.load_json_model('CarveMe/Lreu_ca.json') Lreu_ca_gp = cobra.io.load_json_model('CarveMe/Lreu_ca_gp.json') Lreu_from_iNF517 = cobra.io.load_json_model('Template/Lreu_from_iNF517.json') Lreu_from_iBT721 = cobra.io.load_json_model('Template/Lreu_from_iBT721.json') Lreu_from_iML1515", "set(Lreu_ca_gp_genes)], ('Normal','Gram positive' ), ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram positive'),", "= [i.id for i in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas = [i.id for i in Lreu_from_iML1515.reactions]", ":param : :returns: :rtype: \"\"\" import os import cobra from matplotlib import pyplot", "Lreu_ca_gp have more figure, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg1 =", "# Created by <NAME> at 2019-05-27 \"\"\"Step_01_model_comparison.py :description : script :param : :returns:", "\"\"\" import os import cobra from matplotlib import pyplot as plt from matplotlib_venn", "= pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes = [i.id for i in", "<fig compare templated based method models and Lreu_ca_gp> # just a overview figure_2,", "Lreu_ca_gp_genes = [i.id for i in Lreu_ca_gp.genes] Lreu_ca_reas = [i.id for i in", "ax=axes[1]) fg3 = venn2([set(Lreu_ca_mets), set(Lreu_ca_gp_mets)], ('Normal','Gram positive'), ax=axes[2]) plt.show() Lreu_from_iBT721_genes = [i.id for", "My_def.venn3_samesize([set(Lreu_from_iBT721_genes), set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'), ax=axes[0]) fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'),", "import os import cobra from matplotlib import pyplot as plt from matplotlib_venn import", "venn2 import pandas as pd import My_def from My_def.model_report import * if __name__", "[i.id for i in Lreu_ca_gp.genes] Lreu_ca_reas = [i.id for i in Lreu_ca.reactions] Lreu_ca_gp_reas", "Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes = [i.id for i in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas = [i.id for i", "plt from matplotlib_venn import venn2 import pandas as pd import My_def from My_def.model_report", "= cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes = [i.id", "cobra.io.load_json_model('Template/Lreu_from_iML1515.json') bigg_rea_df = pd.read_csv('../bigg_database/bigg_rea_df.csv', sep='\\t') bigg_met_df = pd.read_csv('../bigg_database/bigg_met_df.csv', sep='\\t') Lreu_ca_genes = [i.id for", "in Lreu_from_iBT721.metabolites] Lreu_from_iNF517_genes = [i.id for i in Lreu_from_iNF517.genes] Lreu_from_iNF517_reas = [i.id for", "utf-8 -*- # Created by <NAME> at 2019-05-27 \"\"\"Step_01_model_comparison.py :description : script :param", "i in Lreu_ca.genes] Lreu_ca_gp_genes = [i.id for i in Lreu_ca_gp.genes] Lreu_ca_reas = [i.id", "= [i.id for i in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes = [i.id for i in Lreu_from_iML1515.genes]", "import cobra from matplotlib import pyplot as plt from matplotlib_venn import venn2 import", "Lreu_from_iNF517.reactions] Lreu_from_iNF517_mets = [i.id for i in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes = [i.id for i", "%% <fig compare templated based method models and Lreu_ca_gp> # just a overview", "set(Lreu_from_iNF517_genes), set(Lreu_from_iML1515_genes)], ('iBT721', 'iNF517','iML1515'), ax=axes[0]) fg_2 = My_def.venn3_samesize([set(Lreu_from_iBT721_reas), set(Lreu_from_iNF517_reas), set(Lreu_from_iML1515_reas)], ('iBT721', 'iNF517','iML1515'), ax=axes[1])", "pd import My_def from My_def.model_report import * if __name__ == '__main__': os.chdir('../../ComplementaryData/Step_02_DraftModels/') #", "%% <fig compare Lreu_ca and Lreu_ca_gp> # Lreu_ca_gp have more figure, axes =", "in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas = [i.id for i in Lreu_from_iML1515.reactions] Lreu_from_iML1515_mets = [i.id for", "compare templated based method models and Lreu_ca_gp> # just a overview figure_2, axes", "for i in Lreu_from_iBT721.genes] Lreu_from_iBT721_reas = [i.id for i in Lreu_from_iBT721.reactions] Lreu_from_iBT721_mets =", "just a overview figure_2, axes = plt.subplots(1, 3) axes[0].set_title(\"gene\") axes[1].set_title(\"rea\") axes[2].set_title(\"met\") fg_1 =", "), ax=axes[0]) # fg1.get_patch_by_id('10').set_color('Aquamarine') fg2 = venn2([set(Lreu_ca_reas), set(Lreu_ca_reas)], ('Normal','Gram positive'), ax=axes[1]) fg3 =", "[i.id for i in Lreu_from_iNF517.metabolites] Lreu_from_iML1515_genes = [i.id for i in Lreu_from_iML1515.genes] Lreu_from_iML1515_reas", "i in Lreu_ca_gp.metabolites] # %% <fig compare Lreu_ca and Lreu_ca_gp> # Lreu_ca_gp have" ]
[ "RFECV.head() RFECV lr = lm.LinearRegression() rfecv = RFECV() rfecv = RFECV(lr, cv=5, n_jobs=-1)", "as sns X.var(0) XDF.groupby('related_page')['n_count'].mean() XX = X[[c for c in X if not", "ecv.score(XDF.values, y) from sklearn.feature_selection import RFECV RFECV.head() RFECV lr = lm.LinearRegression() rfecv =", "XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y) rfecv.grid_scores_.max() rfecv.score(XX, y) lasso lasso.fit(XX, y) lasso.score(XX,", "i in range(2, 20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values, y.values) lr", "y.shapoe y.shape XXX.shape ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.n_jobs = -1 ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.fit(XXX.values,", "0.01, 0.1, 1, 10, 100, 1000], cv=5) rcv.fit(XDF.values, y) rcv rcv.score(XDF.values, y) get_ipython().set_next_input('lasso", "linear_model as lm r = lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values, y) r =", "y.values) ecv.score(XX, y) XDF.columns for c in XDF: if c.startswith('M_') or c.startswith('S_'): print(c,", "axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y) rfecv.grid_scores_.max() rfecv.score(XX, y) lasso lasso.fit(XX, y) lasso.score(XX, y) from sklearn.feature_selection", "0.9, 0.95, 0.99, 1]) ecv.fit(XX.values, y.values) ecv.score(XX, y) XDF.columns for c in XDF:", "XXX.head() y.shapoe y.shape XXX.shape ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.n_jobs = -1 ecv.fit(XXX.values, y.values, n_jobs=-1)", "y.values)) ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75, 0.8, 0.89, 0.9, 0.95, 0.99, 1])", "= lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values, y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv = lm.RidgeCV(alphas=[0.001, 0.01,", "if c.startswith('M_')]] XXX = XXX[[c for c in XXX if c.startswith('S_')]] XXX.head() XXX", "= ['redirect', 'related_page', 'lifetime', 'p_revert', 'M_delta', 'S_delta', 'M_size', 'S_size', 'M_comments', 'S_comments', 'n_count'] XDF", "= lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) for i in range(2, 20): best =", "y) r.score(XDF.values, y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv = lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1,", "RFECV(lr, cv=5, n_jobs=-1) rfecv.fit(XDF.values, y) rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos', '') df.head() X.shape X.head()", "lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75, 0.8, 0.89,", "range(2, 20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values) lr = lm.LinearRegression().fit(XXX, y.values)", "= pd.concat([df[numCols], catData], axis=1) XDF.head() ix = XDF.index.values.copy() import numpy as np np.random.shuffle(ix)", "X.shape X.head() X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '') import seaborn as sns X.var(0) XDF.groupby('related_page')['n_count'].mean()", "as np np.random.shuffle(ix) X = XDF.loc[ix] y = X.pop('n_count') y.head() XDF.head() from sklearn.linear_model", "y.shape XXX.shape ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.n_jobs = -1 ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.fit(XXX.values, y.values)", "y.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5,", "XDF.head() from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(XDF.values, y) lr.score(XDF.values, y) from", "XX[[c for c in XX if not c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX,", "X.pop('n_count') y.head() XDF.head() from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(XDF.values, y) lr.score(XDF.values,", "or c.startswith('S_')]] XXX.head() XXX['lifetime'] = XDF['lifetime'] XXX.head() y.shapoe y.shape XXX.shape ecv.fit(XXX.values, y.values, n_jobs=-1)", "y) rfecv.grid_scores_.max() rfecv.score(XX, y) lasso lasso.fit(XX, y) lasso.score(XX, y) from sklearn.feature_selection import SelectKBest", "0.8, 0.89, 0.9, 0.95, 0.99, 1]) ecv.fit(XX.values, y.values) ecv.score(XX, y) XDF.columns for c", "rcv.coef_ ecv = lm.ElasticNetCV() ecv.fit(XDF.values, y) ecv.score(XDF.values, y) from sklearn.feature_selection import RFECV RFECV.head()", "ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75, 0.8, 0.89, 0.9, 0.95, 0.99, 1]) ecv.fit(XX.values,", "df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols = ['category', 'namespace'] catData =", "'M_comments', 'S_comments', 'n_count'] XDF = pd.concat([df[numCols], catData], axis=1) XDF.head() ix = XDF.index.values.copy() import", "coding: utf-8 import pandas as pd from sklearn.linear_model import LogisticRegressionCV df = pd.read_csv('data/wiki/articles.tsv',", "y) lasso.score(XX, y) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_regression XX.head() get_ipython().run_line_magic('pinfo',", "df['related_page'] = df['related_page'].astype(int) numCols = ['redirect', 'related_page', 'lifetime', 'p_revert', 'M_delta', 'S_delta', 'M_size', 'S_size',", "XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '') import seaborn as sns X.var(0) XDF.groupby('related_page')['n_count'].mean() XX = X[[c for", "= lm.LinearRegression() rfecv = RFECV() rfecv = RFECV(lr, cv=5, n_jobs=-1) rfecv.fit(XDF.values, y) rfecv.grid_scores_", "lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) for i in range(2, 20): best", "drop_first=True)) catData = pd.DataFrame(catData, axis=1) catData = pd.concat(catData, axis=1) catData.head() df.head() df['related_page'] =", "'pd.get_dummies') catCols = ['category', 'namespace'] catData = [] for c in catCols: catData.append(pd.get_dummies(df[c],", "get_ipython().run_line_magic('whos', '') df.head() X.shape X.head() X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '') import seaborn as", "SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values, y.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values))", "XDF[[c for c in XDF if c.startswith('M_')]] XXX = XXX[[c for c in", "if not c.startswith('M_')]] XX = XX[[c for c in XX if not c.startswith('S_')]]", "from sklearn.feature_selection import RFECV RFECV.head() RFECV lr = lm.LinearRegression() rfecv = RFECV() rfecv", "if c.startswith('S_')]] XXX.head() XXX = XDF[[c for c in XDF if c.startswith('M_') or", "1000], cv=5) rcv.fit(XDF.values, y) rcv rcv.score(XDF.values, y) get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso =", "XDF[[c for c in XDF if c.startswith('M_') or c.startswith('S_')]] XXX.head() XXX['lifetime'] = XDF['lifetime']", "for c in XDF if c.startswith('M_')]] XXX = XXX[[c for c in XXX", "'lifetime', 'p_revert', 'M_delta', 'S_delta', 'M_size', 'S_size', 'M_comments', 'S_comments', 'n_count'] XDF = pd.concat([df[numCols], catData],", "XDF.index.values.copy() import numpy as np np.random.shuffle(ix) X = XDF.loc[ix] y = X.pop('n_count') y.head()", "catData.head() df.head() df['related_page'] = df['related_page'].astype(int) numCols = ['redirect', 'related_page', 'lifetime', 'p_revert', 'M_delta', 'S_delta',", "r.score(XDF.values, y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv = lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1, 10,", "best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values, y.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i,", "lr.fit(XDF.values, y) lr.score(XDF.values, y) from sklearn import linear_model as lm r = lm.Ridge().fit(XDF.values,", "best.fit_transform(XX.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) for i in range(2, 20):", "XXX['lifetime'] = XDF['lifetime'] XXX.head() y.shapoe y.shape XXX.shape ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.n_jobs = -1", "y) r = lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values, y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv =", "in XX if not c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y) rfecv.grid_scores_.max() rfecv.score(XX,", "for i in range(2, 20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values, y.values)", "'S_size', 'M_comments', 'S_comments', 'n_count'] XDF = pd.concat([df[numCols], catData], axis=1) XDF.head() ix = XDF.index.values.copy()", "lr.score(XXX, y.values)) for i in range(2, 20): best = SelectKBest(f_regression, k=i) XXX =", "y) get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values, y) r = lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values, y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV')", "lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1, 10, 100, 1000], cv=5) rcv.fit(XDF.values, y) rcv rcv.score(XDF.values, y)", "= lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1, 10, 100, 1000], cv=5) rcv.fit(XDF.values, y) rcv rcv.score(XDF.values,", "ecv.fit(XDF.values, y) ecv.score(XDF.values, y) from sklearn.feature_selection import RFECV RFECV.head() RFECV lr = lm.LinearRegression()", "20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values, y.values) lr = lm.LinearRegression().fit(XXX, y.values)", "XDF = pd.concat([df[numCols], catData], axis=1) XDF.head() ix = XDF.index.values.copy() import numpy as np", "y) get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso = lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values, y) lasso.score(XDF.values, y)", "if not c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y) rfecv.grid_scores_.max() rfecv.score(XX, y) lasso", "SelectKBest from sklearn.feature_selection import f_regression XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape for i in range(2,", "= XDF.loc[ix] y = X.pop('n_count') y.head() XDF.head() from sklearn.linear_model import LinearRegression lr =", "get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv = lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1, 10, 100, 1000], cv=5) rcv.fit(XDF.values,", "rfecv.fit(XDF.values, y) rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos', '') df.head() X.shape X.head() X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean()", "df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols = ['category', 'namespace'] catData = [] for c in", "X.var(0) XDF.groupby('related_page')['n_count'].mean() XX = X[[c for c in X if not c.startswith('M_')]] XX", "XX.shape for i in range(2, 20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values)", "XXX.head() XXX['lifetime'] = XDF['lifetime'] XXX.head() y.shapoe y.shape XXX.shape ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.n_jobs =", "sklearn.linear_model import LogisticRegressionCV df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols = ['category',", "XX.var(0) XXX = XDF[[c for c in XDF if c.startswith('M_')]] XXX = XXX[[c", "= df['related_page'].astype(int) numCols = ['redirect', 'related_page', 'lifetime', 'p_revert', 'M_delta', 'S_delta', 'M_size', 'S_size', 'M_comments',", "get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv = lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1, 10, 100, 1000],", "XX = X[[c for c in X if not c.startswith('M_')]] XX = XX[[c", "c.startswith('M_') or c.startswith('S_')]] XXX.head() XXX['lifetime'] = XDF['lifetime'] XXX.head() y.shapoe y.shape XXX.shape ecv.fit(XXX.values, y.values,", "XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '') import seaborn as sns X.var(0) XDF.groupby('related_page')['n_count'].mean() XX = X[[c", "= best.fit_transform(XX.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) for i in range(2,", "y.values) print(i, lr.score(XXX, y.values)) for i in range(2, 20): best = SelectKBest(f_regression, k=i)", "lr.score(XDF.values, y) from sklearn import linear_model as lm r = lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo',", "lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso = lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values, y) lasso.score(XDF.values, y) lasso.coef_ rcv.coef_ ecv", "axis=1) catData.head() df.head() df['related_page'] = df['related_page'].astype(int) numCols = ['redirect', 'related_page', 'lifetime', 'p_revert', 'M_delta',", "seaborn as sns X.var(0) XDF.groupby('related_page')['n_count'].mean() XX = X[[c for c in X if", "'SelectKBest') XX.shape for i in range(2, 20): best = SelectKBest(f_regression, k=i) XXX =", "0.1, 1, 10, 100, 1000], cv=5) rcv.fit(XDF.values, y) rcv rcv.score(XDF.values, y) get_ipython().set_next_input('lasso =", "lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values, y) lasso.score(XDF.values, y) lasso.coef_ rcv.coef_ ecv = lm.ElasticNetCV() ecv.fit(XDF.values, y)", "y) from sklearn.feature_selection import RFECV RFECV.head() RFECV lr = lm.LinearRegression() rfecv = RFECV()", "RFECV RFECV.head() RFECV lr = lm.LinearRegression() rfecv = RFECV() rfecv = RFECV(lr, cv=5,", "lm.ElasticNetCV() ecv.fit(XDF.values, y) ecv.score(XDF.values, y) from sklearn.feature_selection import RFECV RFECV.head() RFECV lr =", "c in XDF if c.startswith('M_')]] XXX = XXX[[c for c in XXX if", "print(i, lr.score(XXX, y.values)) for i in range(2, 20): best = SelectKBest(f_regression, k=i) XXX", "for c in XXX if c.startswith('S_')]] XXX.head() XXX = XDF[[c for c in", "= [] for c in catCols: catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True)) catData = pd.DataFrame(catData, axis=1)", "y) lasso.score(XDF.values, y) lasso.coef_ rcv.coef_ ecv = lm.ElasticNetCV() ecv.fit(XDF.values, y) ecv.score(XDF.values, y) from", "not c.startswith('M_')]] XX = XX[[c for c in XX if not c.startswith('S_')]] XX.columns", "in catCols: catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True)) catData = pd.DataFrame(catData, axis=1) catData = pd.concat(catData, axis=1)", "lasso lasso.fit(XX, y) lasso.score(XX, y) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_regression", "print(c, XDF[c].var()) XDF['n_count'].var() XX.var(0) XXX = XDF[[c for c in XDF if c.startswith('M_')]]", "lasso.score(XDF.values, y) lasso.coef_ rcv.coef_ ecv = lm.ElasticNetCV() ecv.fit(XDF.values, y) ecv.score(XDF.values, y) from sklearn.feature_selection", "i in range(2, 20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values) lr =", "lasso.fit(XDF.values, y) lasso.score(XDF.values, y) lasso.coef_ rcv.coef_ ecv = lm.ElasticNetCV() ecv.fit(XDF.values, y) ecv.score(XDF.values, y)", "= SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values))", "# coding: utf-8 import pandas as pd from sklearn.linear_model import LogisticRegressionCV df =", "X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '') import seaborn as sns X.var(0) XDF.groupby('related_page')['n_count'].mean() XX =", "lr.score(XXX, y.values)) ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75, 0.8, 0.89, 0.9, 0.95, 0.99,", "y.head() XDF.head() from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(XDF.values, y) lr.score(XDF.values, y)", "'related_page', 'lifetime', 'p_revert', 'M_delta', 'S_delta', 'M_size', 'S_size', 'M_comments', 'S_comments', 'n_count'] XDF = pd.concat([df[numCols],", "c.startswith('M_')]] XX = XX[[c for c in XX if not c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar')", "'M_size', 'S_size', 'M_comments', 'S_comments', 'n_count'] XDF = pd.concat([df[numCols], catData], axis=1) XDF.head() ix =", "y) lasso lasso.fit(XX, y) lasso.score(XX, y) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import", "import LogisticRegressionCV df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols = ['category', 'namespace']", "catCols: catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True)) catData = pd.DataFrame(catData, axis=1) catData = pd.concat(catData, axis=1) catData.head()", "'S_comments', 'n_count'] XDF = pd.concat([df[numCols], catData], axis=1) XDF.head() ix = XDF.index.values.copy() import numpy", "cv=5) rcv.fit(XDF.values, y) rcv rcv.score(XDF.values, y) get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso = lm.LassoCV(n_jobs=-1,", "rcv rcv.score(XDF.values, y) get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso = lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values, y)", "= XXX[[c for c in XXX if c.startswith('S_')]] XXX.head() XXX = XDF[[c for", "y.values) print(i, lr.score(XXX, y.values)) ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75, 0.8, 0.89, 0.9,", "sklearn.feature_selection import f_regression XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape for i in range(2, 20): best", "y) XDF.columns for c in XDF: if c.startswith('M_') or c.startswith('S_'): print(c, XDF[c].var()) XDF['n_count'].var()", "pd.concat(catData, axis=1) catData.head() df.head() df['related_page'] = df['related_page'].astype(int) numCols = ['redirect', 'related_page', 'lifetime', 'p_revert',", "y) from sklearn import linear_model as lm r = lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo', 'r.score')", "from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_regression XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape for", "from sklearn import linear_model as lm r = lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values,", "get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values, y) r = lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values, y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo',", "y) rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos', '') df.head() X.shape X.head() X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib',", "np np.random.shuffle(ix) X = XDF.loc[ix] y = X.pop('n_count') y.head() XDF.head() from sklearn.linear_model import", "import seaborn as sns X.var(0) XDF.groupby('related_page')['n_count'].mean() XX = X[[c for c in X", "'S_delta', 'M_size', 'S_size', 'M_comments', 'S_comments', 'n_count'] XDF = pd.concat([df[numCols], catData], axis=1) XDF.head() ix", "import numpy as np np.random.shuffle(ix) X = XDF.loc[ix] y = X.pop('n_count') y.head() XDF.head()", "in XDF if c.startswith('M_')]] XXX = XXX[[c for c in XXX if c.startswith('S_')]]", "XX if not c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y) rfecv.grid_scores_.max() rfecv.score(XX, y)", "lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values, y) r = lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values, y) get_ipython().run_line_magic('pinfo',", "XDF.loc[ix] y = X.pop('n_count') y.head() XDF.head() from sklearn.linear_model import LinearRegression lr = LinearRegression()", "df['related_page'].astype(int) numCols = ['redirect', 'related_page', 'lifetime', 'p_revert', 'M_delta', 'S_delta', 'M_size', 'S_size', 'M_comments', 'S_comments',", "from sklearn.feature_selection import f_regression XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape for i in range(2, 20):", "= pd.DataFrame(catData, axis=1) catData = pd.concat(catData, axis=1) catData.head() df.head() df['related_page'] = df['related_page'].astype(int) numCols", "0.95, 0.99, 1]) ecv.fit(XX.values, y.values) ecv.score(XX, y) XDF.columns for c in XDF: if", "catData = [] for c in catCols: catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True)) catData = pd.DataFrame(catData,", "f_regression XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape for i in range(2, 20): best = SelectKBest(f_regression,", "or c.startswith('S_'): print(c, XDF[c].var()) XDF['n_count'].var() XX.var(0) XXX = XDF[[c for c in XDF", "k=i) XXX = best.fit_transform(XX.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) for i", "'lm.LassoCV') lasso = lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values, y) lasso.score(XDF.values, y) lasso.coef_ rcv.coef_ ecv =", "lasso.score(XX, y) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_regression XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest')", "= lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75, 0.8,", "sns X.var(0) XDF.groupby('related_page')['n_count'].mean() XX = X[[c for c in X if not c.startswith('M_')]]", "XDF.groupby('related_page')['n_count'].mean() XX = X[[c for c in X if not c.startswith('M_')]] XX =", "lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values, y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv = lm.RidgeCV(alphas=[0.001, 0.01, 0.1,", "XDF if c.startswith('M_') or c.startswith('S_')]] XXX.head() XXX['lifetime'] = XDF['lifetime'] XXX.head() y.shapoe y.shape XXX.shape", "index_col='article_id') df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols = ['category', 'namespace'] catData = [] for c", "10, 100, 1000], cv=5) rcv.fit(XDF.values, y) rcv rcv.score(XDF.values, y) get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV')", "rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos', '') df.head() X.shape X.head() X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '')", "XDF: if c.startswith('M_') or c.startswith('S_'): print(c, XDF[c].var()) XDF['n_count'].var() XX.var(0) XXX = XDF[[c for", "y) rcv rcv.score(XDF.values, y) get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso = lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values,", "LinearRegression() lr.fit(XDF.values, y) lr.score(XDF.values, y) from sklearn import linear_model as lm r =", "import pandas as pd from sklearn.linear_model import LogisticRegressionCV df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head()", "= pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols = ['category', 'namespace'] catData = []", "import LinearRegression lr = LinearRegression() lr.fit(XDF.values, y) lr.score(XDF.values, y) from sklearn import linear_model", "lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75, 0.8, 0.89, 0.9, 0.95, 0.99, 1]) ecv.fit(XX.values, y.values) ecv.score(XX,", "X.head() X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '') import seaborn as sns X.var(0) XDF.groupby('related_page')['n_count'].mean() XX", "LogisticRegressionCV df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols = ['category', 'namespace'] catData", "= X.pop('n_count') y.head() XDF.head() from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(XDF.values, y)", "lasso.fit(XX, y) lasso.score(XX, y) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_regression XX.head()", "'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv = lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1, 10, 100, 1000], cv=5)", "rcv.score(XDF.values, y) get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso = lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values, y) lasso.score(XDF.values,", "cv=5, n_jobs=-1) rfecv.fit(XDF.values, y) rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos', '') df.head() X.shape X.head() X.columns", "get_ipython().run_line_magic('matplotlib', '') import seaborn as sns X.var(0) XDF.groupby('related_page')['n_count'].mean() XX = X[[c for c", "XDF.head() ix = XDF.index.values.copy() import numpy as np np.random.shuffle(ix) X = XDF.loc[ix] y", "c in catCols: catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True)) catData = pd.DataFrame(catData, axis=1) catData = pd.concat(catData,", "= pd.concat(catData, axis=1) catData.head() df.head() df['related_page'] = df['related_page'].astype(int) numCols = ['redirect', 'related_page', 'lifetime',", "c.startswith('S_'): print(c, XDF[c].var()) XDF['n_count'].var() XX.var(0) XXX = XDF[[c for c in XDF if", "XXX.head() XXX = XDF[[c for c in XDF if c.startswith('M_') or c.startswith('S_')]] XXX.head()", "pd.concat([df[numCols], catData], axis=1) XDF.head() ix = XDF.index.values.copy() import numpy as np np.random.shuffle(ix) X", "y) lr.score(XDF.values, y) from sklearn import linear_model as lm r = lm.Ridge().fit(XDF.values, y)", "axis=1) XDF.head() ix = XDF.index.values.copy() import numpy as np np.random.shuffle(ix) X = XDF.loc[ix]", "c.startswith('M_') or c.startswith('S_'): print(c, XDF[c].var()) XDF['n_count'].var() XX.var(0) XXX = XDF[[c for c in", "ecv.score(XX, y) XDF.columns for c in XDF: if c.startswith('M_') or c.startswith('S_'): print(c, XDF[c].var())", "XXX = XXX[[c for c in XXX if c.startswith('S_')]] XXX.head() XXX = XDF[[c", "sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(XDF.values, y) lr.score(XDF.values, y) from sklearn import", "rfecv.score(XX, y) lasso lasso.fit(XX, y) lasso.score(XX, y) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection", "= lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso = lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values, y) lasso.score(XDF.values, y) lasso.coef_ rcv.coef_", "sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_regression XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape for i", "= XDF['lifetime'] XXX.head() y.shapoe y.shape XXX.shape ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.n_jobs = -1 ecv.fit(XXX.values,", "in XDF: if c.startswith('M_') or c.startswith('S_'): print(c, XDF[c].var()) XDF['n_count'].var() XX.var(0) XXX = XDF[[c", "= XDF.index.values.copy() import numpy as np np.random.shuffle(ix) X = XDF.loc[ix] y = X.pop('n_count')", "= XDF[[c for c in XDF if c.startswith('M_') or c.startswith('S_')]] XXX.head() XXX['lifetime'] =", "for c in XDF if c.startswith('M_') or c.startswith('S_')]] XXX.head() XXX['lifetime'] = XDF['lifetime'] XXX.head()", "= lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values, y) r = lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values, y)", "numCols = ['redirect', 'related_page', 'lifetime', 'p_revert', 'M_delta', 'S_delta', 'M_size', 'S_size', 'M_comments', 'S_comments', 'n_count']", "'') df.head() X.shape X.head() X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '') import seaborn as sns", "utf-8 import pandas as pd from sklearn.linear_model import LogisticRegressionCV df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id')", "c.startswith('S_')]] XXX.head() XXX = XDF[[c for c in XDF if c.startswith('M_') or c.startswith('S_')]]", "c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y) rfecv.grid_scores_.max() rfecv.score(XX, y) lasso lasso.fit(XX, y)", "get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape for i in range(2, 20): best = SelectKBest(f_regression, k=i) XXX", "if c.startswith('M_') or c.startswith('S_'): print(c, XDF[c].var()) XDF['n_count'].var() XX.var(0) XXX = XDF[[c for c", "r.score(XDF.values, y) r = lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values, y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv", "XXX = XDF[[c for c in XDF if c.startswith('M_') or c.startswith('S_')]] XXX.head() XXX['lifetime']", "best.fit_transform(XX.values, y.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2,", "y) ecv.score(XDF.values, y) from sklearn.feature_selection import RFECV RFECV.head() RFECV lr = lm.LinearRegression() rfecv", "ix = XDF.index.values.copy() import numpy as np np.random.shuffle(ix) X = XDF.loc[ix] y =", "in range(2, 20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values, y.values) lr =", "r = lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values, y) r = lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values,", "'n_count'] XDF = pd.concat([df[numCols], catData], axis=1) XDF.head() ix = XDF.index.values.copy() import numpy as", "axis=1) catData = pd.concat(catData, axis=1) catData.head() df.head() df['related_page'] = df['related_page'].astype(int) numCols = ['redirect',", "sklearn import linear_model as lm r = lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values, y)", "for c in XDF: if c.startswith('M_') or c.startswith('S_'): print(c, XDF[c].var()) XDF['n_count'].var() XX.var(0) XXX", "lr = lm.LinearRegression() rfecv = RFECV() rfecv = RFECV(lr, cv=5, n_jobs=-1) rfecv.fit(XDF.values, y)", "'namespace'] catData = [] for c in catCols: catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True)) catData =", "lasso = lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values, y) lasso.score(XDF.values, y) lasso.coef_ rcv.coef_ ecv = lm.ElasticNetCV()", "X if not c.startswith('M_')]] XX = XX[[c for c in XX if not", "import f_regression XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape for i in range(2, 20): best =", "range(2, 20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values, y.values) lr = lm.LinearRegression().fit(XXX,", "= RFECV() rfecv = RFECV(lr, cv=5, n_jobs=-1) rfecv.fit(XDF.values, y) rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos',", "as pd from sklearn.linear_model import LogisticRegressionCV df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies')", "XDF['n_count'].var() XX.var(0) XXX = XDF[[c for c in XDF if c.startswith('M_')]] XXX =", "XXX = best.fit_transform(XX.values, y.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) ecv =", "catData], axis=1) XDF.head() ix = XDF.index.values.copy() import numpy as np np.random.shuffle(ix) X =", "rfecv = RFECV() rfecv = RFECV(lr, cv=5, n_jobs=-1) rfecv.fit(XDF.values, y) rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0)", "k=i) XXX = best.fit_transform(XX.values, y.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) ecv", "numpy as np np.random.shuffle(ix) X = XDF.loc[ix] y = X.pop('n_count') y.head() XDF.head() from", "c.startswith('M_')]] XXX = XXX[[c for c in XXX if c.startswith('S_')]] XXX.head() XXX =", "XDF if c.startswith('M_')]] XXX = XXX[[c for c in XXX if c.startswith('S_')]] XXX.head()", "c in XXX if c.startswith('S_')]] XXX.head() XXX = XDF[[c for c in XDF", "c in XDF: if c.startswith('M_') or c.startswith('S_'): print(c, XDF[c].var()) XDF['n_count'].var() XX.var(0) XXX =", "n_jobs=-1) rfecv.fit(XDF.values, y) rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos', '') df.head() X.shape X.head() X.columns XDF.columns", "r = lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values, y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv = lm.RidgeCV(alphas=[0.001,", "XDF['lifetime'] XXX.head() y.shapoe y.shape XXX.shape ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.n_jobs = -1 ecv.fit(XXX.values, y.values,", "catData = pd.DataFrame(catData, axis=1) catData = pd.concat(catData, axis=1) catData.head() df.head() df['related_page'] = df['related_page'].astype(int)", "ecv.fit(XX.values, y.values) ecv.score(XX, y) XDF.columns for c in XDF: if c.startswith('M_') or c.startswith('S_'):", "y) from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import f_regression XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape", "1]) ecv.fit(XX.values, y.values) ecv.score(XX, y) XDF.columns for c in XDF: if c.startswith('M_') or", "lm.LinearRegression() rfecv = RFECV() rfecv = RFECV(lr, cv=5, n_jobs=-1) rfecv.fit(XDF.values, y) rfecv.grid_scores_ rfecv.grid_scores_.max()", "for i in range(2, 20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values) lr", "c in XDF if c.startswith('M_') or c.startswith('S_')]] XXX.head() XXX['lifetime'] = XDF['lifetime'] XXX.head() y.shapoe", "pd.DataFrame(catData, axis=1) catData = pd.concat(catData, axis=1) catData.head() df.head() df['related_page'] = df['related_page'].astype(int) numCols =", "ecv = lm.ElasticNetCV() ecv.fit(XDF.values, y) ecv.score(XDF.values, y) from sklearn.feature_selection import RFECV RFECV.head() RFECV", "['category', 'namespace'] catData = [] for c in catCols: catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True)) catData", "0.99, 1]) ecv.fit(XX.values, y.values) ecv.score(XX, y) XDF.columns for c in XDF: if c.startswith('M_')", "pd from sklearn.linear_model import LogisticRegressionCV df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols", "X = XDF.loc[ix] y = X.pop('n_count') y.head() XDF.head() from sklearn.linear_model import LinearRegression lr", "np.random.shuffle(ix) X = XDF.loc[ix] y = X.pop('n_count') y.head() XDF.head() from sklearn.linear_model import LinearRegression", "XDF.var(0) get_ipython().run_line_magic('whos', '') df.head() X.shape X.head() X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '') import seaborn", "ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.n_jobs = -1 ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.fit(XXX.values, y.values) ecv.score(XXX.values, y.values)", "= X[[c for c in X if not c.startswith('M_')]] XX = XX[[c for", "0.2, 0.5, 0.75, 0.8, 0.89, 0.9, 0.95, 0.99, 1]) ecv.fit(XX.values, y.values) ecv.score(XX, y)", "y) lasso.coef_ rcv.coef_ ecv = lm.ElasticNetCV() ecv.fit(XDF.values, y) ecv.score(XDF.values, y) from sklearn.feature_selection import", "lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75,", "XXX[[c for c in XXX if c.startswith('S_')]] XXX.head() XXX = XDF[[c for c", "XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y) rfecv.grid_scores_.max() rfecv.score(XX, y) lasso lasso.fit(XX, y) lasso.score(XX, y) from", "pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols = ['category', 'namespace'] catData = [] for", "prefix=c, drop_first=True)) catData = pd.DataFrame(catData, axis=1) catData = pd.concat(catData, axis=1) catData.head() df.head() df['related_page']", "from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(XDF.values, y) lr.score(XDF.values, y) from sklearn", "XDF.columns for c in XDF: if c.startswith('M_') or c.startswith('S_'): print(c, XDF[c].var()) XDF['n_count'].var() XX.var(0)", "lm r = lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values, y) r = lm.Ridge(alpha=0.5).fit(XDF.values, y)", "<reponame>tejaswiniallikanti/upgrad-mlcloud<gh_stars>0 # coding: utf-8 import pandas as pd from sklearn.linear_model import LogisticRegressionCV df", "in XDF if c.startswith('M_') or c.startswith('S_')]] XXX.head() XXX['lifetime'] = XDF['lifetime'] XXX.head() y.shapoe y.shape", "X[[c for c in X if not c.startswith('M_')]] XX = XX[[c for c", "import linear_model as lm r = lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values, y) r", "= XX[[c for c in XX if not c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar')", "0.5, 0.75, 0.8, 0.89, 0.9, 0.95, 0.99, 1]) ecv.fit(XX.values, y.values) ecv.score(XX, y) XDF.columns", "RFECV lr = lm.LinearRegression() rfecv = RFECV() rfecv = RFECV(lr, cv=5, n_jobs=-1) rfecv.fit(XDF.values,", "lr = LinearRegression() lr.fit(XDF.values, y) lr.score(XDF.values, y) from sklearn import linear_model as lm", "y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') rcv = lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1, 10, 100,", "rcv.fit(XDF.values, y) rcv rcv.score(XDF.values, y) get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso = lm.LassoCV(n_jobs=-1, cv=5)", "y = X.pop('n_count') y.head() XDF.head() from sklearn.linear_model import LinearRegression lr = LinearRegression() lr.fit(XDF.values,", "print(i, lr.score(XXX, y.values)) ecv = lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75, 0.8, 0.89, 0.9, 0.95,", "catData = pd.concat(catData, axis=1) catData.head() df.head() df['related_page'] = df['related_page'].astype(int) numCols = ['redirect', 'related_page',", "lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) for i in range(2, 20): best = SelectKBest(f_regression,", "XDF[c].var()) XDF['n_count'].var() XX.var(0) XXX = XDF[[c for c in XDF if c.startswith('M_')]] XXX", "20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i,", "1, 10, 100, 1000], cv=5) rcv.fit(XDF.values, y) rcv rcv.score(XDF.values, y) get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo',", "if c.startswith('M_') or c.startswith('S_')]] XXX.head() XXX['lifetime'] = XDF['lifetime'] XXX.head() y.shapoe y.shape XXX.shape ecv.fit(XXX.values,", "pandas as pd from sklearn.linear_model import LogisticRegressionCV df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head() get_ipython().run_line_magic('pinfo',", "'') import seaborn as sns X.var(0) XDF.groupby('related_page')['n_count'].mean() XX = X[[c for c in", "'lm.RidgeCV') rcv = lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1, 10, 100, 1000], cv=5) rcv.fit(XDF.values, y)", "rfecv = RFECV(lr, cv=5, n_jobs=-1) rfecv.fit(XDF.values, y) rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos', '') df.head()", "cv=5) lasso.fit(XDF.values, y) lasso.score(XDF.values, y) lasso.coef_ rcv.coef_ ecv = lm.ElasticNetCV() ecv.fit(XDF.values, y) ecv.score(XDF.values,", "c.startswith('S_')]] XXX.head() XXX['lifetime'] = XDF['lifetime'] XXX.head() y.shapoe y.shape XXX.shape ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.n_jobs", "XX = XX[[c for c in XX if not c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'],", "rfecv.fit(XX, y) rfecv.grid_scores_.max() rfecv.score(XX, y) lasso lasso.fit(XX, y) lasso.score(XX, y) from sklearn.feature_selection import", "rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos', '') df.head() X.shape X.head() X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '') import", "get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso = lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values, y) lasso.score(XDF.values, y) lasso.coef_", "catCols = ['category', 'namespace'] catData = [] for c in catCols: catData.append(pd.get_dummies(df[c], prefix=c,", "100, 1000], cv=5) rcv.fit(XDF.values, y) rcv rcv.score(XDF.values, y) get_ipython().set_next_input('lasso = lm.LassoCV');get_ipython().run_line_magic('pinfo', 'lm.LassoCV') lasso", "in range(2, 20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values) lr = lm.LinearRegression().fit(XXX,", "for c in X if not c.startswith('M_')]] XX = XX[[c for c in", "'M_delta', 'S_delta', 'M_size', 'S_size', 'M_comments', 'S_comments', 'n_count'] XDF = pd.concat([df[numCols], catData], axis=1) XDF.head()", "XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y) rfecv.grid_scores_.max() rfecv.score(XX, y) lasso lasso.fit(XX, y) lasso.score(XX, y)", "get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols = ['category', 'namespace'] catData = [] for c in catCols:", "import RFECV RFECV.head() RFECV lr = lm.LinearRegression() rfecv = RFECV() rfecv = RFECV(lr,", "in X if not c.startswith('M_')]] XX = XX[[c for c in XX if", "= ['category', 'namespace'] catData = [] for c in catCols: catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True))", "df.head() X.shape X.head() X.columns XDF.columns XDF.groupby('redirect')['n_count'].mean() get_ipython().run_line_magic('matplotlib', '') import seaborn as sns X.var(0)", "= SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values, y.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX,", "best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX,", "= XDF[[c for c in XDF if c.startswith('M_')]] XXX = XXX[[c for c", "lasso.coef_ rcv.coef_ ecv = lm.ElasticNetCV() ecv.fit(XDF.values, y) ecv.score(XDF.values, y) from sklearn.feature_selection import RFECV", "import SelectKBest from sklearn.feature_selection import f_regression XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape for i in", "XX.head() get_ipython().run_line_magic('pinfo', 'SelectKBest') XX.shape for i in range(2, 20): best = SelectKBest(f_regression, k=i)", "y.values)) for i in range(2, 20): best = SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values,", "0.75, 0.8, 0.89, 0.9, 0.95, 0.99, 1]) ecv.fit(XX.values, y.values) ecv.score(XX, y) XDF.columns for", "= lm.LassoCV(n_jobs=-1, cv=5) lasso.fit(XDF.values, y) lasso.score(XDF.values, y) lasso.coef_ rcv.coef_ ecv = lm.ElasticNetCV() ecv.fit(XDF.values,", "in XXX if c.startswith('S_')]] XXX.head() XXX = XDF[[c for c in XDF if", "not c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y) rfecv.grid_scores_.max() rfecv.score(XX, y) lasso lasso.fit(XX,", "= best.fit_transform(XX.values, y.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) ecv = lm.ElasticNetCV(l1_ratio=[0.1,", "'p_revert', 'M_delta', 'S_delta', 'M_size', 'S_size', 'M_comments', 'S_comments', 'n_count'] XDF = pd.concat([df[numCols], catData], axis=1)", "'r.score') r.score(XDF.values, y) r = lm.Ridge(alpha=0.5).fit(XDF.values, y) r.score(XDF.values, y) get_ipython().run_line_magic('pinfo', 'lm.RidgeCV') get_ipython().run_line_magic('pinfo', 'lm.RidgeCV')", "['redirect', 'related_page', 'lifetime', 'p_revert', 'M_delta', 'S_delta', 'M_size', 'S_size', 'M_comments', 'S_comments', 'n_count'] XDF =", "c in X if not c.startswith('M_')]] XX = XX[[c for c in XX", "LinearRegression lr = LinearRegression() lr.fit(XDF.values, y) lr.score(XDF.values, y) from sklearn import linear_model as", "0.89, 0.9, 0.95, 0.99, 1]) ecv.fit(XX.values, y.values) ecv.score(XX, y) XDF.columns for c in", "rcv = lm.RidgeCV(alphas=[0.001, 0.01, 0.1, 1, 10, 100, 1000], cv=5) rcv.fit(XDF.values, y) rcv", "rfecv.grid_scores_.max() rfecv.score(XX, y) lasso lasso.fit(XX, y) lasso.score(XX, y) from sklearn.feature_selection import SelectKBest from", "RFECV() rfecv = RFECV(lr, cv=5, n_jobs=-1) rfecv.fit(XDF.values, y) rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos', '')", "XXX = XDF[[c for c in XDF if c.startswith('M_')]] XXX = XXX[[c for", "= lm.ElasticNetCV() ecv.fit(XDF.values, y) ecv.score(XDF.values, y) from sklearn.feature_selection import RFECV RFECV.head() RFECV lr", "catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True)) catData = pd.DataFrame(catData, axis=1) catData = pd.concat(catData, axis=1) catData.head() df.head()", "from sklearn.linear_model import LogisticRegressionCV df = pd.read_csv('data/wiki/articles.tsv', index_col='article_id') df.head() get_ipython().run_line_magic('pinfo', 'pd.get_dummies') catCols =", "[] for c in catCols: catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True)) catData = pd.DataFrame(catData, axis=1) catData", "XXX = best.fit_transform(XX.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) for i in", "df.head() df['related_page'] = df['related_page'].astype(int) numCols = ['redirect', 'related_page', 'lifetime', 'p_revert', 'M_delta', 'S_delta', 'M_size',", "SelectKBest(f_regression, k=i) XXX = best.fit_transform(XX.values) lr = lm.LinearRegression().fit(XXX, y.values) print(i, lr.score(XXX, y.values)) for", "= LinearRegression() lr.fit(XDF.values, y) lr.score(XDF.values, y) from sklearn import linear_model as lm r", "c in XX if not c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y) rfecv.grid_scores_.max()", "for c in catCols: catData.append(pd.get_dummies(df[c], prefix=c, drop_first=True)) catData = pd.DataFrame(catData, axis=1) catData =", "= RFECV(lr, cv=5, n_jobs=-1) rfecv.fit(XDF.values, y) rfecv.grid_scores_ rfecv.grid_scores_.max() XDF.var(0) get_ipython().run_line_magic('whos', '') df.head() X.shape", "XXX if c.startswith('S_')]] XXX.head() XXX = XDF[[c for c in XDF if c.startswith('M_')", "XXX.shape ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.n_jobs = -1 ecv.fit(XXX.values, y.values, n_jobs=-1) ecv.fit(XXX.values, y.values) ecv.score(XXX.values,", "for c in XX if not c.startswith('S_')]] XX.columns XX.var(0).plot(kind='bar') XX.drop(['lifetime'], axis=1).var(0).plot(kind='bar') rfecv.fit(XX, y)", "as lm r = lm.Ridge().fit(XDF.values, y) get_ipython().run_line_magic('pinfo', 'r.score') r.score(XDF.values, y) r = lm.Ridge(alpha=0.5).fit(XDF.values,", "sklearn.feature_selection import RFECV RFECV.head() RFECV lr = lm.LinearRegression() rfecv = RFECV() rfecv =", "= lm.ElasticNetCV(l1_ratio=[0.1, 0.2, 0.5, 0.75, 0.8, 0.89, 0.9, 0.95, 0.99, 1]) ecv.fit(XX.values, y.values)" ]
[ "import Observer class CommandLineLocationDisplay(Observer): def __init__(self): super(CommandLineLocationDisplay, self).__init__() def update(self, *args, **kwargs): \"\"\"Continuously", "print new mac-location pairs\"\"\" argtuple = args[0] mac = argtuple[0] bssid = argtuple[1][0]", "= location['lng'] self.display_location(mac = mac, bssid = bssid, ssid = ssid, lat =", "lng) def display_location(self, mac, bssid, ssid, lat, lng): \"\"\"Take json-formatted location and print", "= args[0] mac = argtuple[0] bssid = argtuple[1][0] ssid = argtuple[1][1] location =", "together with mac\"\"\" print(\"Device {mac} has seen {ssid} ({bssid}) at location {lat}, {lng}\".format(", "mac-location pairs\"\"\" argtuple = args[0] mac = argtuple[0] bssid = argtuple[1][0] ssid =", "mac = mac, ssid = ssid, bssid = bssid, lat = lat, lng", "wirelesstraitor.observer import Observer class CommandLineLocationDisplay(Observer): def __init__(self): super(CommandLineLocationDisplay, self).__init__() def update(self, *args, **kwargs):", "{mac} has seen {ssid} ({bssid}) at location {lat}, {lng}\".format( mac = mac, ssid", "= lng) def display_location(self, mac, bssid, ssid, lat, lng): \"\"\"Take json-formatted location and", "update(self, *args, **kwargs): \"\"\"Continuously print new mac-location pairs\"\"\" argtuple = args[0] mac =", "= argtuple[0] bssid = argtuple[1][0] ssid = argtuple[1][1] location = argtuple[1][2] lat =", "at location {lat}, {lng}\".format( mac = mac, ssid = ssid, bssid = bssid,", "bssid, ssid, lat, lng): \"\"\"Take json-formatted location and print it together with mac\"\"\"", "argtuple[0] bssid = argtuple[1][0] ssid = argtuple[1][1] location = argtuple[1][2] lat = location['lat']", "bssid, ssid = ssid, lat = lat, lng = lng) def display_location(self, mac,", "lat = lat, lng = lng) def display_location(self, mac, bssid, ssid, lat, lng):", "= location['lat'] lng = location['lng'] self.display_location(mac = mac, bssid = bssid, ssid =", "= bssid, ssid = ssid, lat = lat, lng = lng) def display_location(self,", "lng): \"\"\"Take json-formatted location and print it together with mac\"\"\" print(\"Device {mac} has", "super(CommandLineLocationDisplay, self).__init__() def update(self, *args, **kwargs): \"\"\"Continuously print new mac-location pairs\"\"\" argtuple =", "def update(self, *args, **kwargs): \"\"\"Continuously print new mac-location pairs\"\"\" argtuple = args[0] mac", "ssid = argtuple[1][1] location = argtuple[1][2] lat = location['lat'] lng = location['lng'] self.display_location(mac", "<filename>wirelesstraitor/display.py<gh_stars>0 from wirelesstraitor.observer import Observer class CommandLineLocationDisplay(Observer): def __init__(self): super(CommandLineLocationDisplay, self).__init__() def update(self,", "= ssid, lat = lat, lng = lng) def display_location(self, mac, bssid, ssid,", "display_location(self, mac, bssid, ssid, lat, lng): \"\"\"Take json-formatted location and print it together", "location {lat}, {lng}\".format( mac = mac, ssid = ssid, bssid = bssid, lat", "argtuple = args[0] mac = argtuple[0] bssid = argtuple[1][0] ssid = argtuple[1][1] location", "bssid = argtuple[1][0] ssid = argtuple[1][1] location = argtuple[1][2] lat = location['lat'] lng", "self.display_location(mac = mac, bssid = bssid, ssid = ssid, lat = lat, lng", "mac, bssid, ssid, lat, lng): \"\"\"Take json-formatted location and print it together with", "ssid = ssid, lat = lat, lng = lng) def display_location(self, mac, bssid,", "**kwargs): \"\"\"Continuously print new mac-location pairs\"\"\" argtuple = args[0] mac = argtuple[0] bssid", "location = argtuple[1][2] lat = location['lat'] lng = location['lng'] self.display_location(mac = mac, bssid", "argtuple[1][0] ssid = argtuple[1][1] location = argtuple[1][2] lat = location['lat'] lng = location['lng']", "lat = location['lat'] lng = location['lng'] self.display_location(mac = mac, bssid = bssid, ssid", "({bssid}) at location {lat}, {lng}\".format( mac = mac, ssid = ssid, bssid =", "ssid, lat = lat, lng = lng) def display_location(self, mac, bssid, ssid, lat,", "\"\"\"Continuously print new mac-location pairs\"\"\" argtuple = args[0] mac = argtuple[0] bssid =", "json-formatted location and print it together with mac\"\"\" print(\"Device {mac} has seen {ssid}", "print it together with mac\"\"\" print(\"Device {mac} has seen {ssid} ({bssid}) at location", "location['lng'] self.display_location(mac = mac, bssid = bssid, ssid = ssid, lat = lat,", "class CommandLineLocationDisplay(Observer): def __init__(self): super(CommandLineLocationDisplay, self).__init__() def update(self, *args, **kwargs): \"\"\"Continuously print new", "lng = location['lng'] self.display_location(mac = mac, bssid = bssid, ssid = ssid, lat", "lng = lng) def display_location(self, mac, bssid, ssid, lat, lng): \"\"\"Take json-formatted location", "argtuple[1][2] lat = location['lat'] lng = location['lng'] self.display_location(mac = mac, bssid = bssid,", "def __init__(self): super(CommandLineLocationDisplay, self).__init__() def update(self, *args, **kwargs): \"\"\"Continuously print new mac-location pairs\"\"\"", "it together with mac\"\"\" print(\"Device {mac} has seen {ssid} ({bssid}) at location {lat},", "has seen {ssid} ({bssid}) at location {lat}, {lng}\".format( mac = mac, ssid =", "= argtuple[1][2] lat = location['lat'] lng = location['lng'] self.display_location(mac = mac, bssid =", "pairs\"\"\" argtuple = args[0] mac = argtuple[0] bssid = argtuple[1][0] ssid = argtuple[1][1]", "def display_location(self, mac, bssid, ssid, lat, lng): \"\"\"Take json-formatted location and print it", "lat, lng): \"\"\"Take json-formatted location and print it together with mac\"\"\" print(\"Device {mac}", "location and print it together with mac\"\"\" print(\"Device {mac} has seen {ssid} ({bssid})", "\"\"\"Take json-formatted location and print it together with mac\"\"\" print(\"Device {mac} has seen", "{ssid} ({bssid}) at location {lat}, {lng}\".format( mac = mac, ssid = ssid, bssid", "bssid = bssid, ssid = ssid, lat = lat, lng = lng) def", "{lng}\".format( mac = mac, ssid = ssid, bssid = bssid, lat = lat,", "seen {ssid} ({bssid}) at location {lat}, {lng}\".format( mac = mac, ssid = ssid,", "and print it together with mac\"\"\" print(\"Device {mac} has seen {ssid} ({bssid}) at", "mac, ssid = ssid, bssid = bssid, lat = lat, lng = lng))", "new mac-location pairs\"\"\" argtuple = args[0] mac = argtuple[0] bssid = argtuple[1][0] ssid", "= mac, bssid = bssid, ssid = ssid, lat = lat, lng =", "ssid, lat, lng): \"\"\"Take json-formatted location and print it together with mac\"\"\" print(\"Device", "*args, **kwargs): \"\"\"Continuously print new mac-location pairs\"\"\" argtuple = args[0] mac = argtuple[0]", "print(\"Device {mac} has seen {ssid} ({bssid}) at location {lat}, {lng}\".format( mac = mac,", "= argtuple[1][1] location = argtuple[1][2] lat = location['lat'] lng = location['lng'] self.display_location(mac =", "= argtuple[1][0] ssid = argtuple[1][1] location = argtuple[1][2] lat = location['lat'] lng =", "__init__(self): super(CommandLineLocationDisplay, self).__init__() def update(self, *args, **kwargs): \"\"\"Continuously print new mac-location pairs\"\"\" argtuple", "from wirelesstraitor.observer import Observer class CommandLineLocationDisplay(Observer): def __init__(self): super(CommandLineLocationDisplay, self).__init__() def update(self, *args,", "Observer class CommandLineLocationDisplay(Observer): def __init__(self): super(CommandLineLocationDisplay, self).__init__() def update(self, *args, **kwargs): \"\"\"Continuously print", "with mac\"\"\" print(\"Device {mac} has seen {ssid} ({bssid}) at location {lat}, {lng}\".format( mac", "argtuple[1][1] location = argtuple[1][2] lat = location['lat'] lng = location['lng'] self.display_location(mac = mac,", "args[0] mac = argtuple[0] bssid = argtuple[1][0] ssid = argtuple[1][1] location = argtuple[1][2]", "= lat, lng = lng) def display_location(self, mac, bssid, ssid, lat, lng): \"\"\"Take", "self).__init__() def update(self, *args, **kwargs): \"\"\"Continuously print new mac-location pairs\"\"\" argtuple = args[0]", "= mac, ssid = ssid, bssid = bssid, lat = lat, lng =", "mac = argtuple[0] bssid = argtuple[1][0] ssid = argtuple[1][1] location = argtuple[1][2] lat", "mac, bssid = bssid, ssid = ssid, lat = lat, lng = lng)", "lat, lng = lng) def display_location(self, mac, bssid, ssid, lat, lng): \"\"\"Take json-formatted", "{lat}, {lng}\".format( mac = mac, ssid = ssid, bssid = bssid, lat =", "location['lat'] lng = location['lng'] self.display_location(mac = mac, bssid = bssid, ssid = ssid,", "mac\"\"\" print(\"Device {mac} has seen {ssid} ({bssid}) at location {lat}, {lng}\".format( mac =", "CommandLineLocationDisplay(Observer): def __init__(self): super(CommandLineLocationDisplay, self).__init__() def update(self, *args, **kwargs): \"\"\"Continuously print new mac-location" ]
[ "ConfigType = TypeVar('ConfigType', bound=ConfigBase) def load(config: ConfigType, d: Dict[str, Any]) -> ConfigType: fields", "str keyword to tell apart instances in the same window. Returns ------- PySimpleGUI.Frame", "\"\"\" Return dict of ConfigBase class. \"\"\" return {key:getattr(self, key) for key in", "All config class must inherit this class. \"\"\" def __init__(self) -> None: \"\"\"", "field, getattr(self, field)) @property def fields(self) -> List[str]: \"\"\" Fields of configuration. \"\"\"", "typing import Dict, List, Any, TypeVar import PySimpleGUI as sg class ConfigBase(ABC): \"\"\"", "sg class ConfigBase(ABC): \"\"\" Base class of config class. All config class must", "for key in self.fields} @abstractmethod def GUI(self, parent: str='') -> sg.Frame: \"\"\" GUI.", "variables. \"\"\" for field in self.fields: setattr(self, field, getattr(self, field)) @property def fields(self)", "} elif field in {'color', 'stroke_color'}: kwargs = { field: tuple(val[:3]) if type(val)", "not in d: continue val = d[field] if type(val) is dict: kwargs =", "load(getattr(config, field), val,) } elif field in {'color', 'stroke_color'}: kwargs = { field:", "configuration. \"\"\" return list(self.__annotations__.keys()) def update(self, *args: Any, **kwargs: Any) -> None: \"\"\"", "field: load(getattr(config, field), val,) } elif field in {'color', 'stroke_color'}: kwargs = {", "Dict[str, Any]) -> ConfigType: fields = config.fields for field in fields: if field", "GUI. Parameters ---------- parent : str keyword to tell apart instances in the", "type(val) is dict: kwargs = { field: load(getattr(config, field), val,) } elif field", "in the same window. Returns ------- PySimpleGUI.Frame \"\"\" pass ConfigType = TypeVar('ConfigType', bound=ConfigBase)", "\"\"\" pass ConfigType = TypeVar('ConfigType', bound=ConfigBase) def load(config: ConfigType, d: Dict[str, Any]) ->", "load(config: ConfigType, d: Dict[str, Any]) -> ConfigType: fields = config.fields for field in", "field in {'color', 'stroke_color'}: kwargs = { field: tuple(val[:3]) if type(val) is list", "---------- parent : str keyword to tell apart instances in the same window.", "variables as instance variables. \"\"\" for field in self.fields: setattr(self, field, getattr(self, field))", "\"\"\" return list(self.__annotations__.keys()) def update(self, *args: Any, **kwargs: Any) -> None: \"\"\" Update", "for field in fields: if field not in d: continue val = d[field]", "Fields of configuration. \"\"\" return list(self.__annotations__.keys()) def update(self, *args: Any, **kwargs: Any) ->", "setattr(self, field, getattr(self, field)) @property def fields(self) -> List[str]: \"\"\" Fields of configuration.", "-> ConfigType: fields = config.fields for field in fields: if field not in", "Base class of config class. All config class must inherit this class. \"\"\"", "d: continue val = d[field] if type(val) is dict: kwargs = { field:", "config class must inherit this class. \"\"\" def __init__(self) -> None: \"\"\" Set", "Update instance variables with kwargs. \"\"\" for key, val in kwargs.items(): if key", "variables with kwargs. \"\"\" for key, val in kwargs.items(): if key in self.__annotations__:", "tuple(val[:3]) if type(val) is list else val } else: kwargs = { field:", "Any, TypeVar import PySimpleGUI as sg class ConfigBase(ABC): \"\"\" Base class of config", "{'color', 'stroke_color'}: kwargs = { field: tuple(val[:3]) if type(val) is list else val", "class variables as instance variables. \"\"\" for field in self.fields: setattr(self, field, getattr(self,", "for field in self.fields: setattr(self, field, getattr(self, field)) @property def fields(self) -> List[str]:", "if key in self.__annotations__: setattr(self, key, val) def asdict(self) -> Dict[str, Any]: \"\"\"", "in kwargs.items(): if key in self.__annotations__: setattr(self, key, val) def asdict(self) -> Dict[str,", "class. All config class must inherit this class. \"\"\" def __init__(self) -> None:", "import PySimpleGUI as sg class ConfigBase(ABC): \"\"\" Base class of config class. All", "ConfigType: fields = config.fields for field in fields: if field not in d:", "= { field: load(getattr(config, field), val,) } elif field in {'color', 'stroke_color'}: kwargs", "val in kwargs.items(): if key in self.__annotations__: setattr(self, key, val) def asdict(self) ->", ": str keyword to tell apart instances in the same window. Returns -------", "type(val) is list else val } else: kwargs = { field: val }", "config class. All config class must inherit this class. \"\"\" def __init__(self) ->", "self.__annotations__: setattr(self, key, val) def asdict(self) -> Dict[str, Any]: \"\"\" Return dict of", "instance variables. \"\"\" for field in self.fields: setattr(self, field, getattr(self, field)) @property def", "kwargs = { field: load(getattr(config, field), val,) } elif field in {'color', 'stroke_color'}:", "key, val in kwargs.items(): if key in self.__annotations__: setattr(self, key, val) def asdict(self)", "in {'color', 'stroke_color'}: kwargs = { field: tuple(val[:3]) if type(val) is list else", "def GUI(self, parent: str='') -> sg.Frame: \"\"\" GUI. Parameters ---------- parent : str", "Returns ------- PySimpleGUI.Frame \"\"\" pass ConfigType = TypeVar('ConfigType', bound=ConfigBase) def load(config: ConfigType, d:", "GUI(self, parent: str='') -> sg.Frame: \"\"\" GUI. Parameters ---------- parent : str keyword", "dict of ConfigBase class. \"\"\" return {key:getattr(self, key) for key in self.fields} @abstractmethod", "else val } else: kwargs = { field: val } config.update(**kwargs) return config", "keyword to tell apart instances in the same window. Returns ------- PySimpleGUI.Frame \"\"\"", "is dict: kwargs = { field: load(getattr(config, field), val,) } elif field in", "in self.__annotations__: setattr(self, key, val) def asdict(self) -> Dict[str, Any]: \"\"\" Return dict", "{key:getattr(self, key) for key in self.fields} @abstractmethod def GUI(self, parent: str='') -> sg.Frame:", "if field not in d: continue val = d[field] if type(val) is dict:", "field not in d: continue val = d[field] if type(val) is dict: kwargs", "def load(config: ConfigType, d: Dict[str, Any]) -> ConfigType: fields = config.fields for field", "with kwargs. \"\"\" for key, val in kwargs.items(): if key in self.__annotations__: setattr(self,", "PySimpleGUI as sg class ConfigBase(ABC): \"\"\" Base class of config class. All config", "as sg class ConfigBase(ABC): \"\"\" Base class of config class. All config class", "\"\"\" for key, val in kwargs.items(): if key in self.__annotations__: setattr(self, key, val)", "key) for key in self.fields} @abstractmethod def GUI(self, parent: str='') -> sg.Frame: \"\"\"", "val,) } elif field in {'color', 'stroke_color'}: kwargs = { field: tuple(val[:3]) if", "inherit this class. \"\"\" def __init__(self) -> None: \"\"\" Set class variables as", "import Dict, List, Any, TypeVar import PySimpleGUI as sg class ConfigBase(ABC): \"\"\" Base", "instance variables with kwargs. \"\"\" for key, val in kwargs.items(): if key in", "elif field in {'color', 'stroke_color'}: kwargs = { field: tuple(val[:3]) if type(val) is", "in self.fields} @abstractmethod def GUI(self, parent: str='') -> sg.Frame: \"\"\" GUI. Parameters ----------", "window. Returns ------- PySimpleGUI.Frame \"\"\" pass ConfigType = TypeVar('ConfigType', bound=ConfigBase) def load(config: ConfigType,", "None: \"\"\" Set class variables as instance variables. \"\"\" for field in self.fields:", "field in self.fields: setattr(self, field, getattr(self, field)) @property def fields(self) -> List[str]: \"\"\"", "self.fields: setattr(self, field, getattr(self, field)) @property def fields(self) -> List[str]: \"\"\" Fields of", "kwargs = { field: tuple(val[:3]) if type(val) is list else val } else:", "of configuration. \"\"\" return list(self.__annotations__.keys()) def update(self, *args: Any, **kwargs: Any) -> None:", "setattr(self, key, val) def asdict(self) -> Dict[str, Any]: \"\"\" Return dict of ConfigBase", "apart instances in the same window. Returns ------- PySimpleGUI.Frame \"\"\" pass ConfigType =", "for key, val in kwargs.items(): if key in self.__annotations__: setattr(self, key, val) def", "Dict, List, Any, TypeVar import PySimpleGUI as sg class ConfigBase(ABC): \"\"\" Base class", "class of config class. All config class must inherit this class. \"\"\" def", "-> List[str]: \"\"\" Fields of configuration. \"\"\" return list(self.__annotations__.keys()) def update(self, *args: Any,", "\"\"\" Set class variables as instance variables. \"\"\" for field in self.fields: setattr(self,", "-> Dict[str, Any]: \"\"\" Return dict of ConfigBase class. \"\"\" return {key:getattr(self, key)", "\"\"\" def __init__(self) -> None: \"\"\" Set class variables as instance variables. \"\"\"", "@property def fields(self) -> List[str]: \"\"\" Fields of configuration. \"\"\" return list(self.__annotations__.keys()) def", "Parameters ---------- parent : str keyword to tell apart instances in the same", "instances in the same window. Returns ------- PySimpleGUI.Frame \"\"\" pass ConfigType = TypeVar('ConfigType',", "fields(self) -> List[str]: \"\"\" Fields of configuration. \"\"\" return list(self.__annotations__.keys()) def update(self, *args:", "= config.fields for field in fields: if field not in d: continue val", "field in fields: if field not in d: continue val = d[field] if", "\"\"\" GUI. Parameters ---------- parent : str keyword to tell apart instances in", "**kwargs: Any) -> None: \"\"\" Update instance variables with kwargs. \"\"\" for key,", "-> sg.Frame: \"\"\" GUI. Parameters ---------- parent : str keyword to tell apart", "\"\"\" Base class of config class. All config class must inherit this class.", "def update(self, *args: Any, **kwargs: Any) -> None: \"\"\" Update instance variables with", "fields: if field not in d: continue val = d[field] if type(val) is", "field)) @property def fields(self) -> List[str]: \"\"\" Fields of configuration. \"\"\" return list(self.__annotations__.keys())", "update(self, *args: Any, **kwargs: Any) -> None: \"\"\" Update instance variables with kwargs.", "self.fields} @abstractmethod def GUI(self, parent: str='') -> sg.Frame: \"\"\" GUI. Parameters ---------- parent", "to tell apart instances in the same window. Returns ------- PySimpleGUI.Frame \"\"\" pass", "= TypeVar('ConfigType', bound=ConfigBase) def load(config: ConfigType, d: Dict[str, Any]) -> ConfigType: fields =", "sg.Frame: \"\"\" GUI. Parameters ---------- parent : str keyword to tell apart instances", "None: \"\"\" Update instance variables with kwargs. \"\"\" for key, val in kwargs.items():", "ABC, abstractmethod from typing import Dict, List, Any, TypeVar import PySimpleGUI as sg", "TypeVar import PySimpleGUI as sg class ConfigBase(ABC): \"\"\" Base class of config class.", "ConfigType, d: Dict[str, Any]) -> ConfigType: fields = config.fields for field in fields:", "@abstractmethod def GUI(self, parent: str='') -> sg.Frame: \"\"\" GUI. Parameters ---------- parent :", "-> None: \"\"\" Set class variables as instance variables. \"\"\" for field in", "as instance variables. \"\"\" for field in self.fields: setattr(self, field, getattr(self, field)) @property", "list(self.__annotations__.keys()) def update(self, *args: Any, **kwargs: Any) -> None: \"\"\" Update instance variables", "\"\"\" return {key:getattr(self, key) for key in self.fields} @abstractmethod def GUI(self, parent: str='')", "\"\"\" for field in self.fields: setattr(self, field, getattr(self, field)) @property def fields(self) ->", "parent : str keyword to tell apart instances in the same window. Returns", "TypeVar('ConfigType', bound=ConfigBase) def load(config: ConfigType, d: Dict[str, Any]) -> ConfigType: fields = config.fields", "class ConfigBase(ABC): \"\"\" Base class of config class. All config class must inherit", "= d[field] if type(val) is dict: kwargs = { field: load(getattr(config, field), val,)", "is list else val } else: kwargs = { field: val } config.update(**kwargs)", "__init__(self) -> None: \"\"\" Set class variables as instance variables. \"\"\" for field", "def fields(self) -> List[str]: \"\"\" Fields of configuration. \"\"\" return list(self.__annotations__.keys()) def update(self,", "pass ConfigType = TypeVar('ConfigType', bound=ConfigBase) def load(config: ConfigType, d: Dict[str, Any]) -> ConfigType:", "if type(val) is list else val } else: kwargs = { field: val", "kwargs.items(): if key in self.__annotations__: setattr(self, key, val) def asdict(self) -> Dict[str, Any]:", "-> None: \"\"\" Update instance variables with kwargs. \"\"\" for key, val in", "{ field: tuple(val[:3]) if type(val) is list else val } else: kwargs =", "key in self.fields} @abstractmethod def GUI(self, parent: str='') -> sg.Frame: \"\"\" GUI. Parameters", "------- PySimpleGUI.Frame \"\"\" pass ConfigType = TypeVar('ConfigType', bound=ConfigBase) def load(config: ConfigType, d: Dict[str,", "fields = config.fields for field in fields: if field not in d: continue", "this class. \"\"\" def __init__(self) -> None: \"\"\" Set class variables as instance", "from abc import ABC, abstractmethod from typing import Dict, List, Any, TypeVar import", "in d: continue val = d[field] if type(val) is dict: kwargs = {", "\"\"\" Update instance variables with kwargs. \"\"\" for key, val in kwargs.items(): if", "Any, **kwargs: Any) -> None: \"\"\" Update instance variables with kwargs. \"\"\" for", "Any]) -> ConfigType: fields = config.fields for field in fields: if field not", "{ field: load(getattr(config, field), val,) } elif field in {'color', 'stroke_color'}: kwargs =", "the same window. Returns ------- PySimpleGUI.Frame \"\"\" pass ConfigType = TypeVar('ConfigType', bound=ConfigBase) def", "parent: str='') -> sg.Frame: \"\"\" GUI. Parameters ---------- parent : str keyword to", "bound=ConfigBase) def load(config: ConfigType, d: Dict[str, Any]) -> ConfigType: fields = config.fields for", "Return dict of ConfigBase class. \"\"\" return {key:getattr(self, key) for key in self.fields}", "from typing import Dict, List, Any, TypeVar import PySimpleGUI as sg class ConfigBase(ABC):", "Dict[str, Any]: \"\"\" Return dict of ConfigBase class. \"\"\" return {key:getattr(self, key) for", "= { field: tuple(val[:3]) if type(val) is list else val } else: kwargs", "abc import ABC, abstractmethod from typing import Dict, List, Any, TypeVar import PySimpleGUI", "class. \"\"\" return {key:getattr(self, key) for key in self.fields} @abstractmethod def GUI(self, parent:", "in fields: if field not in d: continue val = d[field] if type(val)", "class. \"\"\" def __init__(self) -> None: \"\"\" Set class variables as instance variables.", "must inherit this class. \"\"\" def __init__(self) -> None: \"\"\" Set class variables", "same window. Returns ------- PySimpleGUI.Frame \"\"\" pass ConfigType = TypeVar('ConfigType', bound=ConfigBase) def load(config:", "ConfigBase class. \"\"\" return {key:getattr(self, key) for key in self.fields} @abstractmethod def GUI(self,", "return list(self.__annotations__.keys()) def update(self, *args: Any, **kwargs: Any) -> None: \"\"\" Update instance", "getattr(self, field)) @property def fields(self) -> List[str]: \"\"\" Fields of configuration. \"\"\" return", "Any]: \"\"\" Return dict of ConfigBase class. \"\"\" return {key:getattr(self, key) for key", "tell apart instances in the same window. Returns ------- PySimpleGUI.Frame \"\"\" pass ConfigType", "list else val } else: kwargs = { field: val } config.update(**kwargs) return", "d: Dict[str, Any]) -> ConfigType: fields = config.fields for field in fields: if", "\"\"\" Fields of configuration. \"\"\" return list(self.__annotations__.keys()) def update(self, *args: Any, **kwargs: Any)", "key, val) def asdict(self) -> Dict[str, Any]: \"\"\" Return dict of ConfigBase class.", "List[str]: \"\"\" Fields of configuration. \"\"\" return list(self.__annotations__.keys()) def update(self, *args: Any, **kwargs:", "kwargs. \"\"\" for key, val in kwargs.items(): if key in self.__annotations__: setattr(self, key,", "of config class. All config class must inherit this class. \"\"\" def __init__(self)", "val = d[field] if type(val) is dict: kwargs = { field: load(getattr(config, field),", "val) def asdict(self) -> Dict[str, Any]: \"\"\" Return dict of ConfigBase class. \"\"\"", "continue val = d[field] if type(val) is dict: kwargs = { field: load(getattr(config,", "d[field] if type(val) is dict: kwargs = { field: load(getattr(config, field), val,) }", "str='') -> sg.Frame: \"\"\" GUI. Parameters ---------- parent : str keyword to tell", "in self.fields: setattr(self, field, getattr(self, field)) @property def fields(self) -> List[str]: \"\"\" Fields", "return {key:getattr(self, key) for key in self.fields} @abstractmethod def GUI(self, parent: str='') ->", "Set class variables as instance variables. \"\"\" for field in self.fields: setattr(self, field,", "def __init__(self) -> None: \"\"\" Set class variables as instance variables. \"\"\" for", "import ABC, abstractmethod from typing import Dict, List, Any, TypeVar import PySimpleGUI as", "dict: kwargs = { field: load(getattr(config, field), val,) } elif field in {'color',", "PySimpleGUI.Frame \"\"\" pass ConfigType = TypeVar('ConfigType', bound=ConfigBase) def load(config: ConfigType, d: Dict[str, Any])", "List, Any, TypeVar import PySimpleGUI as sg class ConfigBase(ABC): \"\"\" Base class of", "class must inherit this class. \"\"\" def __init__(self) -> None: \"\"\" Set class", "asdict(self) -> Dict[str, Any]: \"\"\" Return dict of ConfigBase class. \"\"\" return {key:getattr(self,", "field: tuple(val[:3]) if type(val) is list else val } else: kwargs = {", "*args: Any, **kwargs: Any) -> None: \"\"\" Update instance variables with kwargs. \"\"\"", "def asdict(self) -> Dict[str, Any]: \"\"\" Return dict of ConfigBase class. \"\"\" return", "'stroke_color'}: kwargs = { field: tuple(val[:3]) if type(val) is list else val }", "ConfigBase(ABC): \"\"\" Base class of config class. All config class must inherit this", "field), val,) } elif field in {'color', 'stroke_color'}: kwargs = { field: tuple(val[:3])", "abstractmethod from typing import Dict, List, Any, TypeVar import PySimpleGUI as sg class", "of ConfigBase class. \"\"\" return {key:getattr(self, key) for key in self.fields} @abstractmethod def", "<gh_stars>0 from abc import ABC, abstractmethod from typing import Dict, List, Any, TypeVar", "Any) -> None: \"\"\" Update instance variables with kwargs. \"\"\" for key, val", "if type(val) is dict: kwargs = { field: load(getattr(config, field), val,) } elif", "key in self.__annotations__: setattr(self, key, val) def asdict(self) -> Dict[str, Any]: \"\"\" Return", "config.fields for field in fields: if field not in d: continue val =" ]
[ "not in method_dispatcher.keys(): # flag = False raise Exception('method must be in one", "2: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect two wordString for getSimBetween\" return result", "len(word_list) > 1: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one wordString for getSim\"", "\"wordString\" not in params.keys(): raise Exception('\"method\" and \"wordString\" are expected as the Query", "a single word string :return: the sim words list of the given word", "must be in one of ' + str(list(method_dispatcher.keys()))) def lambda_handler(event, context): result =", "lambda word_list,: get_similarity_between(word_list) } def validate_event(event): \"\"\" This function will validate the event", "words \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) != 2: result[\"status_code\"] = \"0001\"", "> 1: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one wordString for getSim\" return", "of two given words :param word_list: list of two words A B for", "B for similarity calculation :return: cosine similarity of the two given words \"\"\"", "= model.similar_by_word(word) result[\"sim_words\"] = sim_words except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"]", "cosine similarity of the two given words \"\"\" result = {\"status_code\": \"0000\"} if", "= params[\"wordString\"] result = method_dispatcher[method](word_list) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] =", "getSimBetween\" return result try: word_a = word_list[0] word_b = word_list[1] similarity = model.similarity(word_a,", "= model.get_vector(word) result[\"vec\"] = str(np.array(vec).tolist()) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"]", "single word string :return: the vector list of this word \"\"\" result =", "of the given word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) > 1:", "result def get_sim_by_word(word_list): \"\"\" This method will return a list of the similar", "# flag = False method = params.get(\"method\") if len(method) != 1: # flag", "import json import numpy as np MODEL_VERSION = \"glove-wiki-gigaword-300\" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def", "if exists :param event: :return: \"\"\" params = event[\"multiValueQueryStringParameters\"] if \"method\" not in", "'statusCode': 200, 'body': \"\" } try: validate_event(event) except Exception as e: result[\"status_code\"] =", "wordString for getVec\" return result word = word_list[0] try: vec = model.get_vector(word) result[\"vec\"]", "word_list[0] try: sim_words = model.similar_by_word(word) result[\"sim_words\"] = sim_words except Exception as e: result[\"status_code\"]", "\"Expect two wordString for getSimBetween\" return result try: word_a = word_list[0] word_b =", "in method_dispatcher.keys(): # flag = False raise Exception('method must be in one of", "similarity of the two given words \"\"\" result = {\"status_code\": \"0000\"} if len(word_list)", "result method_dispatcher = { \"getVec\": lambda word_list,: get_word_vec(word_list), \"getSim\": lambda word_list,: get_sim_by_word(word_list), \"getSimBetween\":", "models import json import numpy as np MODEL_VERSION = \"glove-wiki-gigaword-300\" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION)", "params = event[\"multiValueQueryStringParameters\"] if \"method\" not in params.keys() or \"wordString\" not in params.keys():", "Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"]", "Exception('method must be in one of ' + str(list(method_dispatcher.keys()))) def lambda_handler(event, context): result", "one of ' + str(list(method_dispatcher.keys()))) def lambda_handler(event, context): result = {} response =", "as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result def get_similarity_between(word_list): \"\"\"", "= str(e) return result def get_similarity_between(word_list): \"\"\" This method will get the similarity", "result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"]", "This method will get the similarity of two given words :param word_list: list", "words by the given word :param word_list: list of a single word string", "\"Expect one wordString for getVec\" return result word = word_list[0] try: vec =", "except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result method_dispatcher", "getVec\" return result word = word_list[0] try: vec = model.get_vector(word) result[\"vec\"] = str(np.array(vec).tolist())", "try: word_a = word_list[0] word_b = word_list[1] similarity = model.similarity(word_a, word_b) result[\"similarity\"] =", "in params.keys() or \"wordString\" not in params.keys(): raise Exception('\"method\" and \"wordString\" are expected", "e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result method_dispatcher = { \"getVec\":", "\"\"\" result = {\"status_code\": \"0000\"} if len(word_list) != 2: result[\"status_code\"] = \"0001\" result[\"result_info\"]", "result[\"similarity\"] = str(similarity) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e)", "if __name__ == \"__main__\": f = open('mock_event.json') mock_event = json.load(f) f.close() print(lambda_handler(mock_event, context=\"\"))", "numpy as np MODEL_VERSION = \"glove-wiki-gigaword-300\" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): \"\"\" This", "not in params.keys(): raise Exception('\"method\" and \"wordString\" are expected as the Query Params')", "of this word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) > 1: result[\"status_code\"]", "raise Exception('method must be in one of ' + str(list(method_dispatcher.keys()))) def lambda_handler(event, context):", "getSim\" return result word = word_list[0] try: sim_words = model.similar_by_word(word) result[\"sim_words\"] = sim_words", "word_list[0] try: vec = model.get_vector(word) result[\"vec\"] = str(np.array(vec).tolist()) except Exception as e: result[\"status_code\"]", "1: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one wordString for getVec\" return result", "return result try: word_a = word_list[0] word_b = word_list[1] similarity = model.similarity(word_a, word_b)", "word_list,: get_sim_by_word(word_list), \"getSimBetween\": lambda word_list,: get_similarity_between(word_list) } def validate_event(event): \"\"\" This function will", "Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result method_dispatcher =", "\"\"\" This method will get the vector of the given word :param word_list:", "method will return a list of the similar words by the given word", "single word string :return: the sim words list of the given word \"\"\"", "get_sim_by_word(word_list): \"\"\" This method will return a list of the similar words by", "list of the similar words by the given word :param word_list: list of", "exists :param event: :return: \"\"\" params = event[\"multiValueQueryStringParameters\"] if \"method\" not in params.keys()", "from gensim import models import json import numpy as np MODEL_VERSION = \"glove-wiki-gigaword-300\"", "return result def get_similarity_between(word_list): \"\"\" This method will get the similarity of two", "result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) return response params = event[\"multiValueQueryStringParameters\"] method =", "\"0000\"} if len(word_list) > 1: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one wordString", "= \"0001\" result[\"result_info\"] = str(e) return result def get_sim_by_word(word_list): \"\"\" This method will", "result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result def get_similarity_between(word_list): \"\"\" This method", "or \"wordString\" not in params.keys(): raise Exception('\"method\" and \"wordString\" are expected as the", "= \"0001\" result[\"result_info\"] = str(e) return result def get_similarity_between(word_list): \"\"\" This method will", "param') method = method[0] if method not in method_dispatcher.keys(): # flag = False", "event send from API gateway to Lambda and raise exception if exists :param", "result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) return response params =", "result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) print(response) return response if", "event[\"multiValueQueryStringParameters\"] if \"method\" not in params.keys() or \"wordString\" not in params.keys(): raise Exception('\"method\"", "word_list,: get_similarity_between(word_list) } def validate_event(event): \"\"\" This function will validate the event send", "list of two words A B for similarity calculation :return: cosine similarity of", "# flag = False raise Exception('Expect one value for method param') method =", "= {\"status_code\": \"0000\"} if len(word_list) != 2: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect", "a list of the similar words by the given word :param word_list: list", "False method = params.get(\"method\") if len(method) != 1: # flag = False raise", "method = params.get(\"method\") if len(method) != 1: # flag = False raise Exception('Expect", "words list of the given word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list)", "flag = False raise Exception('Expect one value for method param') method = method[0]", "get_word_vec(word_list), \"getSim\": lambda word_list,: get_sim_by_word(word_list), \"getSimBetween\": lambda word_list,: get_similarity_between(word_list) } def validate_event(event): \"\"\"", "model.similarity(word_a, word_b) result[\"similarity\"] = str(similarity) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"]", "This method will return a list of the similar words by the given", "result = {\"status_code\": \"0000\"} if len(word_list) > 1: result[\"status_code\"] = \"0001\" result[\"result_info\"] =", "response[\"body\"] = json.dumps(result) print(response) return response if __name__ == \"__main__\": f = open('mock_event.json')", "params.keys() or \"wordString\" not in params.keys(): raise Exception('\"method\" and \"wordString\" are expected as", "Lambda and raise exception if exists :param event: :return: \"\"\" params = event[\"multiValueQueryStringParameters\"]", "of a single word string :return: the sim words list of the given", "= {} response = { 'statusCode': 200, 'body': \"\" } try: validate_event(event) except", "\"0001\" result[\"result_info\"] = str(e) return result def get_similarity_between(word_list): \"\"\" This method will get", "A B for similarity calculation :return: cosine similarity of the two given words", "\"\"\" params = event[\"multiValueQueryStringParameters\"] if \"method\" not in params.keys() or \"wordString\" not in", "def get_word_vec(word_list): \"\"\" This method will get the vector of the given word", "\"\"\" This method will return a list of the similar words by the", "for similarity calculation :return: cosine similarity of the two given words \"\"\" result", "except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"]", "False raise Exception('method must be in one of ' + str(list(method_dispatcher.keys()))) def lambda_handler(event,", "result = method_dispatcher[method](word_list) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) print(response)", "= params.get(\"method\") if len(method) != 1: # flag = False raise Exception('Expect one", "method param') method = method[0] if method not in method_dispatcher.keys(): # flag =", "= json.dumps(result) return response params = event[\"multiValueQueryStringParameters\"] method = params[\"method\"][0] word_list = params[\"wordString\"]", "word string :return: the sim words list of the given word \"\"\" result", "len(word_list) != 2: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect two wordString for getSimBetween\"", "validate_event(event): \"\"\" This function will validate the event send from API gateway to", "event: :return: \"\"\" params = event[\"multiValueQueryStringParameters\"] if \"method\" not in params.keys() or \"wordString\"", "method = params[\"method\"][0] word_list = params[\"wordString\"] result = method_dispatcher[method](word_list) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"]", "len(word_list) > 1: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one wordString for getVec\"", "response = { 'statusCode': 200, 'body': \"\" } try: validate_event(event) except Exception as", "result[\"result_info\"] = \"Expect one wordString for getSim\" return result word = word_list[0] try:", "API gateway to Lambda and raise exception if exists :param event: :return: \"\"\"", "result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one wordString for getSim\" return result word", "result[\"result_info\"] = str(e) return result def get_similarity_between(word_list): \"\"\" This method will get the", "= {\"status_code\": \"0000\"} if len(word_list) > 1: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect", "list of a single word string :return: the sim words list of the", "if \"method\" not in params.keys() or \"wordString\" not in params.keys(): raise Exception('\"method\" and", "try: validate_event(event) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) result[\"request_info\"]", ":return: cosine similarity of the two given words \"\"\" result = {\"status_code\": \"0000\"}", "word_list,: get_word_vec(word_list), \"getSim\": lambda word_list,: get_sim_by_word(word_list), \"getSimBetween\": lambda word_list,: get_similarity_between(word_list) } def validate_event(event):", "json.dumps(result) print(response) return response if __name__ == \"__main__\": f = open('mock_event.json') mock_event =", "params.keys(): raise Exception('\"method\" and \"wordString\" are expected as the Query Params') # flag", "def get_sim_by_word(word_list): \"\"\" This method will return a list of the similar words", "method not in method_dispatcher.keys(): # flag = False raise Exception('method must be in", "word_b) result[\"similarity\"] = str(similarity) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] =", "string :return: the vector list of this word \"\"\" result = {\"status_code\": \"0000\"}", "lambda word_list,: get_sim_by_word(word_list), \"getSimBetween\": lambda word_list,: get_similarity_between(word_list) } def validate_event(event): \"\"\" This function", "Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result def get_similarity_between(word_list):", "will validate the event send from API gateway to Lambda and raise exception", "Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result def get_sim_by_word(word_list):", "str(list(method_dispatcher.keys()))) def lambda_handler(event, context): result = {} response = { 'statusCode': 200, 'body':", "= \"glove-wiki-gigaword-300\" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): \"\"\" This method will get the", "\"glove-wiki-gigaword-300\" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): \"\"\" This method will get the vector", "event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) return response params = event[\"multiValueQueryStringParameters\"] method", "{\"status_code\": \"0000\"} if len(word_list) != 2: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect two", "result word = word_list[0] try: sim_words = model.similar_by_word(word) result[\"sim_words\"] = sim_words except Exception", "\"0000\"} if len(word_list) != 2: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect two wordString", "= \"Expect one wordString for getSim\" return result word = word_list[0] try: sim_words", "= event[\"multiValueQueryStringParameters\"] if \"method\" not in params.keys() or \"wordString\" not in params.keys(): raise", "len(method) != 1: # flag = False raise Exception('Expect one value for method", "str(e) return result def get_sim_by_word(word_list): \"\"\" This method will return a list of", "value for method param') method = method[0] if method not in method_dispatcher.keys(): #", "\"\"\" result = {\"status_code\": \"0000\"} if len(word_list) > 1: result[\"status_code\"] = \"0001\" result[\"result_info\"]", "word_a = word_list[0] word_b = word_list[1] similarity = model.similarity(word_a, word_b) result[\"similarity\"] = str(similarity)", "return a list of the similar words by the given word :param word_list:", "the two given words \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) != 2:", "\"0001\" result[\"result_info\"] = \"Expect two wordString for getSimBetween\" return result try: word_a =", "' + str(list(method_dispatcher.keys()))) def lambda_handler(event, context): result = {} response = { 'statusCode':", "words :param word_list: list of two words A B for similarity calculation :return:", "get_word_vec(word_list): \"\"\" This method will get the vector of the given word :param", "will return a list of the similar words by the given word :param", "def get_similarity_between(word_list): \"\"\" This method will get the similarity of two given words", "result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) print(response) return response if __name__ == \"__main__\":", "calculation :return: cosine similarity of the two given words \"\"\" result = {\"status_code\":", "print(response) return response if __name__ == \"__main__\": f = open('mock_event.json') mock_event = json.load(f)", "the sim words list of the given word \"\"\" result = {\"status_code\": \"0000\"}", "word_list[0] word_b = word_list[1] similarity = model.similarity(word_a, word_b) result[\"similarity\"] = str(similarity) except Exception", "from API gateway to Lambda and raise exception if exists :param event: :return:", "result[\"result_info\"] = str(e) return result def get_sim_by_word(word_list): \"\"\" This method will return a", "method_dispatcher = { \"getVec\": lambda word_list,: get_word_vec(word_list), \"getSim\": lambda word_list,: get_sim_by_word(word_list), \"getSimBetween\": lambda", "{ 'statusCode': 200, 'body': \"\" } try: validate_event(event) except Exception as e: result[\"status_code\"]", "result def get_similarity_between(word_list): \"\"\" This method will get the similarity of two given", "get the similarity of two given words :param word_list: list of two words", ":param word_list: list of two words A B for similarity calculation :return: cosine", "json.dumps(result) return response params = event[\"multiValueQueryStringParameters\"] method = params[\"method\"][0] word_list = params[\"wordString\"] result", "str(similarity) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result", "MODEL_VERSION = \"glove-wiki-gigaword-300\" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): \"\"\" This method will get", "word string :return: the vector list of this word \"\"\" result = {\"status_code\":", "False raise Exception('Expect one value for method param') method = method[0] if method", "try: sim_words = model.similar_by_word(word) result[\"sim_words\"] = sim_words except Exception as e: result[\"status_code\"] =", "list of a single word string :return: the vector list of this word", "the similarity of two given words :param word_list: list of two words A", "\"getSimBetween\": lambda word_list,: get_similarity_between(word_list) } def validate_event(event): \"\"\" This function will validate the", "model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): \"\"\" This method will get the vector of", "\"0001\" result[\"result_info\"] = str(e) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result)", "wordString for getSim\" return result word = word_list[0] try: sim_words = model.similar_by_word(word) result[\"sim_words\"]", ":param event: :return: \"\"\" params = event[\"multiValueQueryStringParameters\"] if \"method\" not in params.keys() or", "MODEL_VERSION response[\"body\"] = json.dumps(result) print(response) return response if __name__ == \"__main__\": f =", "{} response = { 'statusCode': 200, 'body': \"\" } try: validate_event(event) except Exception", "validate the event send from API gateway to Lambda and raise exception if", "result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one wordString for getVec\" return result word", "return result word = word_list[0] try: vec = model.get_vector(word) result[\"vec\"] = str(np.array(vec).tolist()) except", "model.get_vector(word) result[\"vec\"] = str(np.array(vec).tolist()) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] =", "response if __name__ == \"__main__\": f = open('mock_event.json') mock_event = json.load(f) f.close() print(lambda_handler(mock_event,", "of the similar words by the given word :param word_list: list of a", "= str(similarity) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return", "the vector list of this word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list)", "= model.similarity(word_a, word_b) result[\"similarity\"] = str(similarity) except Exception as e: result[\"status_code\"] = \"0001\"", "method_dispatcher[method](word_list) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) print(response) return response", "raise Exception('\"method\" and \"wordString\" are expected as the Query Params') # flag =", "flag = False method = params.get(\"method\") if len(method) != 1: # flag =", "method[0] if method not in method_dispatcher.keys(): # flag = False raise Exception('method must", "the given word :param word_list: list of a single word string :return: the", "str(e) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) return response params", "as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result def get_sim_by_word(word_list): \"\"\"", "word = word_list[0] try: sim_words = model.similar_by_word(word) result[\"sim_words\"] = sim_words except Exception as", "method will get the vector of the given word :param word_list: list of", "= False raise Exception('method must be in one of ' + str(list(method_dispatcher.keys()))) def", "result = {\"status_code\": \"0000\"} if len(word_list) != 2: result[\"status_code\"] = \"0001\" result[\"result_info\"] =", "1: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one wordString for getSim\" return result", "the given word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) > 1: result[\"status_code\"]", "This function will validate the event send from API gateway to Lambda and", "!= 1: # flag = False raise Exception('Expect one value for method param')", "= str(e) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) return response", "word :param word_list: list of a single word string :return: the vector list", "def validate_event(event): \"\"\" This function will validate the event send from API gateway", "= \"Expect two wordString for getSimBetween\" return result try: word_a = word_list[0] word_b", "result word = word_list[0] try: vec = model.get_vector(word) result[\"vec\"] = str(np.array(vec).tolist()) except Exception", "flag = False raise Exception('method must be in one of ' + str(list(method_dispatcher.keys())))", "two wordString for getSimBetween\" return result try: word_a = word_list[0] word_b = word_list[1]", "two given words :param word_list: list of two words A B for similarity", "context): result = {} response = { 'statusCode': 200, 'body': \"\" } try:", "sim_words = model.similar_by_word(word) result[\"sim_words\"] = sim_words except Exception as e: result[\"status_code\"] = \"0001\"", "this word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) > 1: result[\"status_code\"] =", "if len(method) != 1: # flag = False raise Exception('Expect one value for", "= params[\"method\"][0] word_list = params[\"wordString\"] result = method_dispatcher[method](word_list) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] =", "return result word = word_list[0] try: sim_words = model.similar_by_word(word) result[\"sim_words\"] = sim_words except", "str(np.array(vec).tolist()) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result", "result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result def get_sim_by_word(word_list): \"\"\" This method", "of ' + str(list(method_dispatcher.keys()))) def lambda_handler(event, context): result = {} response = {", "= \"Expect one wordString for getVec\" return result word = word_list[0] try: vec", "as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result method_dispatcher = {", "get_sim_by_word(word_list), \"getSimBetween\": lambda word_list,: get_similarity_between(word_list) } def validate_event(event): \"\"\" This function will validate", "word = word_list[0] try: vec = model.get_vector(word) result[\"vec\"] = str(np.array(vec).tolist()) except Exception as", "Query Params') # flag = False method = params.get(\"method\") if len(method) != 1:", "the similar words by the given word :param word_list: list of a single", "\"0001\" result[\"result_info\"] = \"Expect one wordString for getSim\" return result word = word_list[0]", "= event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) print(response) return response if __name__", "expected as the Query Params') # flag = False method = params.get(\"method\") if", "= event[\"multiValueQueryStringParameters\"] method = params[\"method\"][0] word_list = params[\"wordString\"] result = method_dispatcher[method](word_list) result[\"request_info\"] =", "= json.dumps(result) print(response) return response if __name__ == \"__main__\": f = open('mock_event.json') mock_event", "This method will get the vector of the given word :param word_list: list", "= \"0001\" result[\"result_info\"] = str(e) return result method_dispatcher = { \"getVec\": lambda word_list,:", "two given words \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) != 2: result[\"status_code\"]", "of two words A B for similarity calculation :return: cosine similarity of the", "gensim import models import json import numpy as np MODEL_VERSION = \"glove-wiki-gigaword-300\" model", "get_similarity_between(word_list): \"\"\" This method will get the similarity of two given words :param", "np MODEL_VERSION = \"glove-wiki-gigaword-300\" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): \"\"\" This method will", "= \"0001\" result[\"result_info\"] = \"Expect one wordString for getSim\" return result word =", "raise exception if exists :param event: :return: \"\"\" params = event[\"multiValueQueryStringParameters\"] if \"method\"", "+ str(list(method_dispatcher.keys()))) def lambda_handler(event, context): result = {} response = { 'statusCode': 200,", "Exception('Expect one value for method param') method = method[0] if method not in", "\"getVec\": lambda word_list,: get_word_vec(word_list), \"getSim\": lambda word_list,: get_sim_by_word(word_list), \"getSimBetween\": lambda word_list,: get_similarity_between(word_list) }", "'body': \"\" } try: validate_event(event) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"]", "1: # flag = False raise Exception('Expect one value for method param') method", "= { 'statusCode': 200, 'body': \"\" } try: validate_event(event) except Exception as e:", "str(e) return result method_dispatcher = { \"getVec\": lambda word_list,: get_word_vec(word_list), \"getSim\": lambda word_list,:", "return response params = event[\"multiValueQueryStringParameters\"] method = params[\"method\"][0] word_list = params[\"wordString\"] result =", "similar words by the given word :param word_list: list of a single word", "\"method\" not in params.keys() or \"wordString\" not in params.keys(): raise Exception('\"method\" and \"wordString\"", "are expected as the Query Params') # flag = False method = params.get(\"method\")", "the vector of the given word :param word_list: list of a single word", "Exception('\"method\" and \"wordString\" are expected as the Query Params') # flag = False", "= \"0001\" result[\"result_info\"] = str(e) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] =", "for getSimBetween\" return result try: word_a = word_list[0] word_b = word_list[1] similarity =", "will get the similarity of two given words :param word_list: list of two", "result[\"sim_words\"] = sim_words except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e)", ":return: the vector list of this word \"\"\" result = {\"status_code\": \"0000\"} if", "\"0001\" result[\"result_info\"] = str(e) return result method_dispatcher = { \"getVec\": lambda word_list,: get_word_vec(word_list),", "} def validate_event(event): \"\"\" This function will validate the event send from API", "vec = model.get_vector(word) result[\"vec\"] = str(np.array(vec).tolist()) except Exception as e: result[\"status_code\"] = \"0001\"", "vector list of this word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) >", "\"\"\" This method will get the similarity of two given words :param word_list:", "try: vec = model.get_vector(word) result[\"vec\"] = str(np.array(vec).tolist()) except Exception as e: result[\"status_code\"] =", "send from API gateway to Lambda and raise exception if exists :param event:", "as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] =", "{\"status_code\": \"0000\"} if len(word_list) > 1: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one", "lambda word_list,: get_word_vec(word_list), \"getSim\": lambda word_list,: get_sim_by_word(word_list), \"getSimBetween\": lambda word_list,: get_similarity_between(word_list) } def", "json import numpy as np MODEL_VERSION = \"glove-wiki-gigaword-300\" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list):", "if len(word_list) != 2: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect two wordString for", ":return: the sim words list of the given word \"\"\" result = {\"status_code\":", "of a single word string :return: the vector list of this word \"\"\"", "wordString for getSimBetween\" return result try: word_a = word_list[0] word_b = word_list[1] similarity", "by the given word :param word_list: list of a single word string :return:", "word :param word_list: list of a single word string :return: the sim words", "} try: validate_event(event) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e)", "result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result method_dispatcher = { \"getVec\": lambda", "except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result def", "= MODEL_VERSION response[\"body\"] = json.dumps(result) print(response) return response if __name__ == \"__main__\": f", "= str(np.array(vec).tolist()) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return", "method will get the similarity of two given words :param word_list: list of", "result try: word_a = word_list[0] word_b = word_list[1] similarity = model.similarity(word_a, word_b) result[\"similarity\"]", "= word_list[1] similarity = model.similarity(word_a, word_b) result[\"similarity\"] = str(similarity) except Exception as e:", "in one of ' + str(list(method_dispatcher.keys()))) def lambda_handler(event, context): result = {} response", "method = method[0] if method not in method_dispatcher.keys(): # flag = False raise", "get the vector of the given word :param word_list: list of a single", ":param word_list: list of a single word string :return: the vector list of", "= word_list[0] word_b = word_list[1] similarity = model.similarity(word_a, word_b) result[\"similarity\"] = str(similarity) except", "given word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) > 1: result[\"status_code\"] =", "e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result def get_similarity_between(word_list): \"\"\" This", "\"\" } try: validate_event(event) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] =", "word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) > 1: result[\"status_code\"] = \"0001\"", "\"\"\" This function will validate the event send from API gateway to Lambda", "of the two given words \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) !=", "= method[0] if method not in method_dispatcher.keys(): # flag = False raise Exception('method", "a single word string :return: the vector list of this word \"\"\" result", "list of the given word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) >", "will get the vector of the given word :param word_list: list of a", "= sim_words except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return", "to Lambda and raise exception if exists :param event: :return: \"\"\" params =", "= event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) return response params = event[\"multiValueQueryStringParameters\"]", "= { \"getVec\": lambda word_list,: get_word_vec(word_list), \"getSim\": lambda word_list,: get_sim_by_word(word_list), \"getSimBetween\": lambda word_list,:", "gateway to Lambda and raise exception if exists :param event: :return: \"\"\" params", "for getSim\" return result word = word_list[0] try: sim_words = model.similar_by_word(word) result[\"sim_words\"] =", "= \"0001\" result[\"result_info\"] = \"Expect two wordString for getSimBetween\" return result try: word_a", "sim_words except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result", "200, 'body': \"\" } try: validate_event(event) except Exception as e: result[\"status_code\"] = \"0001\"", "> 1: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one wordString for getVec\" return", "be in one of ' + str(list(method_dispatcher.keys()))) def lambda_handler(event, context): result = {}", "lambda_handler(event, context): result = {} response = { 'statusCode': 200, 'body': \"\" }", "params[\"method\"][0] word_list = params[\"wordString\"] result = method_dispatcher[method](word_list) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION", "word_list: list of two words A B for similarity calculation :return: cosine similarity", "word_b = word_list[1] similarity = model.similarity(word_a, word_b) result[\"similarity\"] = str(similarity) except Exception as", "= method_dispatcher[method](word_list) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) print(response) return", "as np MODEL_VERSION = \"glove-wiki-gigaword-300\" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): \"\"\" This method", "one wordString for getVec\" return result word = word_list[0] try: vec = model.get_vector(word)", "word_list: list of a single word string :return: the sim words list of", "MODEL_VERSION response[\"body\"] = json.dumps(result) return response params = event[\"multiValueQueryStringParameters\"] method = params[\"method\"][0] word_list", "e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION", "return result def get_sim_by_word(word_list): \"\"\" This method will return a list of the", "str(e) return result def get_similarity_between(word_list): \"\"\" This method will get the similarity of", "params = event[\"multiValueQueryStringParameters\"] method = params[\"method\"][0] word_list = params[\"wordString\"] result = method_dispatcher[method](word_list) result[\"request_info\"]", "= \"0001\" result[\"result_info\"] = \"Expect one wordString for getVec\" return result word =", "if method not in method_dispatcher.keys(): # flag = False raise Exception('method must be", "e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) return result def get_sim_by_word(word_list): \"\"\" This", "import numpy as np MODEL_VERSION = \"glove-wiki-gigaword-300\" model = models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): \"\"\"", "method_dispatcher.keys(): # flag = False raise Exception('method must be in one of '", "params[\"wordString\"] result = method_dispatcher[method](word_list) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result)", "result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect two wordString for getSimBetween\" return result try:", "get_similarity_between(word_list) } def validate_event(event): \"\"\" This function will validate the event send from", "exception if exists :param event: :return: \"\"\" params = event[\"multiValueQueryStringParameters\"] if \"method\" not", "= str(e) return result method_dispatcher = { \"getVec\": lambda word_list,: get_word_vec(word_list), \"getSim\": lambda", "event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) print(response) return response if __name__ ==", "list of this word \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) > 1:", "similarity calculation :return: cosine similarity of the two given words \"\"\" result =", "of the given word :param word_list: list of a single word string :return:", "= False method = params.get(\"method\") if len(method) != 1: # flag = False", "result[\"result_info\"] = str(e) return result method_dispatcher = { \"getVec\": lambda word_list,: get_word_vec(word_list), \"getSim\":", "{ \"getVec\": lambda word_list,: get_word_vec(word_list), \"getSim\": lambda word_list,: get_sim_by_word(word_list), \"getSimBetween\": lambda word_list,: get_similarity_between(word_list)", "words A B for similarity calculation :return: cosine similarity of the two given", "string :return: the sim words list of the given word \"\"\" result =", "the event send from API gateway to Lambda and raise exception if exists", "given word :param word_list: list of a single word string :return: the vector", "!= 2: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect two wordString for getSimBetween\" return", "word_list: list of a single word string :return: the vector list of this", "for getVec\" return result word = word_list[0] try: vec = model.get_vector(word) result[\"vec\"] =", "similarity of two given words :param word_list: list of two words A B", "word_list[1] similarity = model.similarity(word_a, word_b) result[\"similarity\"] = str(similarity) except Exception as e: result[\"status_code\"]", "\"Expect one wordString for getSim\" return result word = word_list[0] try: sim_words =", "response[\"body\"] = json.dumps(result) return response params = event[\"multiValueQueryStringParameters\"] method = params[\"method\"][0] word_list =", "= MODEL_VERSION response[\"body\"] = json.dumps(result) return response params = event[\"multiValueQueryStringParameters\"] method = params[\"method\"][0]", "models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): \"\"\" This method will get the vector of the given", "the Query Params') # flag = False method = params.get(\"method\") if len(method) !=", "one value for method param') method = method[0] if method not in method_dispatcher.keys():", "two words A B for similarity calculation :return: cosine similarity of the two", "not in params.keys() or \"wordString\" not in params.keys(): raise Exception('\"method\" and \"wordString\" are", "function will validate the event send from API gateway to Lambda and raise", "response params = event[\"multiValueQueryStringParameters\"] method = params[\"method\"][0] word_list = params[\"wordString\"] result = method_dispatcher[method](word_list)", "if len(word_list) > 1: result[\"status_code\"] = \"0001\" result[\"result_info\"] = \"Expect one wordString for", "result[\"result_info\"] = \"Expect two wordString for getSimBetween\" return result try: word_a = word_list[0]", "one wordString for getSim\" return result word = word_list[0] try: sim_words = model.similar_by_word(word)", "similarity = model.similarity(word_a, word_b) result[\"similarity\"] = str(similarity) except Exception as e: result[\"status_code\"] =", "def lambda_handler(event, context): result = {} response = { 'statusCode': 200, 'body': \"\"", "result[\"vec\"] = str(np.array(vec).tolist()) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e)", "# flag = False raise Exception('method must be in one of ' +", "= word_list[0] try: vec = model.get_vector(word) result[\"vec\"] = str(np.array(vec).tolist()) except Exception as e:", ":param word_list: list of a single word string :return: the sim words list", "vector of the given word :param word_list: list of a single word string", "\"0001\" result[\"result_info\"] = \"Expect one wordString for getVec\" return result word = word_list[0]", "result[\"result_info\"] = str(e) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"] = json.dumps(result) return", "= word_list[0] try: sim_words = model.similar_by_word(word) result[\"sim_words\"] = sim_words except Exception as e:", "as the Query Params') # flag = False method = params.get(\"method\") if len(method)", "sim words list of the given word \"\"\" result = {\"status_code\": \"0000\"} if", "given words \"\"\" result = {\"status_code\": \"0000\"} if len(word_list) != 2: result[\"status_code\"] =", ":return: \"\"\" params = event[\"multiValueQueryStringParameters\"] if \"method\" not in params.keys() or \"wordString\" not", "\"getSim\": lambda word_list,: get_sim_by_word(word_list), \"getSimBetween\": lambda word_list,: get_similarity_between(word_list) } def validate_event(event): \"\"\" This", "result[\"result_info\"] = \"Expect one wordString for getVec\" return result word = word_list[0] try:", "given word :param word_list: list of a single word string :return: the sim", "result = {} response = { 'statusCode': 200, 'body': \"\" } try: validate_event(event)", "model.similar_by_word(word) result[\"sim_words\"] = sim_words except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] =", "validate_event(event) except Exception as e: result[\"status_code\"] = \"0001\" result[\"result_info\"] = str(e) result[\"request_info\"] =", "in params.keys(): raise Exception('\"method\" and \"wordString\" are expected as the Query Params') #", "for method param') method = method[0] if method not in method_dispatcher.keys(): # flag", "import models import json import numpy as np MODEL_VERSION = \"glove-wiki-gigaword-300\" model =", "Params') # flag = False method = params.get(\"method\") if len(method) != 1: #", "= False raise Exception('Expect one value for method param') method = method[0] if", "return response if __name__ == \"__main__\": f = open('mock_event.json') mock_event = json.load(f) f.close()", "raise Exception('Expect one value for method param') method = method[0] if method not", "and \"wordString\" are expected as the Query Params') # flag = False method", "params.get(\"method\") if len(method) != 1: # flag = False raise Exception('Expect one value", "= str(e) return result def get_sim_by_word(word_list): \"\"\" This method will return a list", "return result method_dispatcher = { \"getVec\": lambda word_list,: get_word_vec(word_list), \"getSim\": lambda word_list,: get_sim_by_word(word_list),", "\"0001\" result[\"result_info\"] = str(e) return result def get_sim_by_word(word_list): \"\"\" This method will return", "and raise exception if exists :param event: :return: \"\"\" params = event[\"multiValueQueryStringParameters\"] if", "given words :param word_list: list of two words A B for similarity calculation", "\"wordString\" are expected as the Query Params') # flag = False method =", "= models.KeyedVectors.load_word2vec_format(MODEL_VERSION) def get_word_vec(word_list): \"\"\" This method will get the vector of the", "event[\"multiValueQueryStringParameters\"] method = params[\"method\"][0] word_list = params[\"wordString\"] result = method_dispatcher[method](word_list) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"]", "word_list = params[\"wordString\"] result = method_dispatcher[method](word_list) result[\"request_info\"] = event[\"multiValueQueryStringParameters\"] result[\"model_version\"] = MODEL_VERSION response[\"body\"]" ]
[ "else '1' for x in reversed(output)] output = output[0:35651584] while len(output) % 2", "['0'] + ['0' if x == '1' else '1' for x in reversed(output)]", "= output[0:35651584] while len(output) % 2 == 0: output = ['1' if output[i]", "list(inp) while len(output) < 35651584: output += ['0'] + ['0' if x ==", "output = ['1' if output[i] == output[i + 1] else '0' for i", "len(output) % 2 == 0: output = ['1' if output[i] == output[i +", "output[0:35651584] while len(output) % 2 == 0: output = ['1' if output[i] ==", "output[i] == output[i + 1] else '0' for i in range(0, len(output), 2)]", "output = output[0:35651584] while len(output) % 2 == 0: output = ['1' if", "35651584: output += ['0'] + ['0' if x == '1' else '1' for", "'1' else '1' for x in reversed(output)] output = output[0:35651584] while len(output) %", "+ ['0' if x == '1' else '1' for x in reversed(output)] output", "== '1' else '1' for x in reversed(output)] output = output[0:35651584] while len(output)", "['1' if output[i] == output[i + 1] else '0' for i in range(0,", "< 35651584: output += ['0'] + ['0' if x == '1' else '1'", "if x == '1' else '1' for x in reversed(output)] output = output[0:35651584]", "= list(inp) while len(output) < 35651584: output += ['0'] + ['0' if x", "['0' if x == '1' else '1' for x in reversed(output)] output =", "0: output = ['1' if output[i] == output[i + 1] else '0' for", "len(output) < 35651584: output += ['0'] + ['0' if x == '1' else", "= ['1' if output[i] == output[i + 1] else '0' for i in", "== 0: output = ['1' if output[i] == output[i + 1] else '0'", "% 2 == 0: output = ['1' if output[i] == output[i + 1]", "for x in reversed(output)] output = output[0:35651584] while len(output) % 2 == 0:", "output[i + 1] else '0' for i in range(0, len(output), 2)] print(''.join(output)) input()", "2 == 0: output = ['1' if output[i] == output[i + 1] else", "output += ['0'] + ['0' if x == '1' else '1' for x", "= '11101000110010100' output = list(inp) while len(output) < 35651584: output += ['0'] +", "reversed(output)] output = output[0:35651584] while len(output) % 2 == 0: output = ['1'", "if output[i] == output[i + 1] else '0' for i in range(0, len(output),", "x == '1' else '1' for x in reversed(output)] output = output[0:35651584] while", "inp = '11101000110010100' output = list(inp) while len(output) < 35651584: output += ['0']", "'1' for x in reversed(output)] output = output[0:35651584] while len(output) % 2 ==", "in reversed(output)] output = output[0:35651584] while len(output) % 2 == 0: output =", "== output[i + 1] else '0' for i in range(0, len(output), 2)] print(''.join(output))", "while len(output) < 35651584: output += ['0'] + ['0' if x == '1'", "output = list(inp) while len(output) < 35651584: output += ['0'] + ['0' if", "'11101000110010100' output = list(inp) while len(output) < 35651584: output += ['0'] + ['0'", "x in reversed(output)] output = output[0:35651584] while len(output) % 2 == 0: output", "+= ['0'] + ['0' if x == '1' else '1' for x in", "while len(output) % 2 == 0: output = ['1' if output[i] == output[i" ]
[ "= portal_name self._prepared_statement_name = prepared_statement_name self._parameter_values = parameter_values def read_bytes(self): bytes_ = pack('!{0}sx{1}sxHH'.format(", "__init__(self, portal_name, prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self) self._portal_name = portal_name self._prepared_statement_name = prepared_statement_name self._parameter_values =", "+= pack('!I', [-1]) else: bytes_ += pack('!I{0}s'.format(len(val)), len(val), val) bytes_ += pack('!H', [0])", "BulkFrontendMessage.__init__(self) self._portal_name = portal_name self._prepared_statement_name = prepared_statement_name self._parameter_values = parameter_values def read_bytes(self): bytes_", "import pack from ..message import BulkFrontendMessage class Bind(BulkFrontendMessage): message_id = b'B' def __init__(self,", "is None: bytes_ += pack('!I', [-1]) else: bytes_ += pack('!I{0}s'.format(len(val)), len(val), val) bytes_", "pack('!I', [-1]) else: bytes_ += pack('!I{0}s'.format(len(val)), len(val), val) bytes_ += pack('!H', [0]) return", "parameter_values def read_bytes(self): bytes_ = pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values)) for", "= prepared_statement_name self._parameter_values = parameter_values def read_bytes(self): bytes_ = pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)), self._portal_name,", "bytes_ = pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values)) for val in self._parameter_values.values():", "def read_bytes(self): bytes_ = pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values)) for val", "self._prepared_statement_name = prepared_statement_name self._parameter_values = parameter_values def read_bytes(self): bytes_ = pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)),", "__future__ import print_function, division, absolute_import from struct import pack from ..message import BulkFrontendMessage", "prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self) self._portal_name = portal_name self._prepared_statement_name = prepared_statement_name self._parameter_values = parameter_values def", "len(self._portal_name), len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values)) for val in self._parameter_values.values(): if val is", "pack from ..message import BulkFrontendMessage class Bind(BulkFrontendMessage): message_id = b'B' def __init__(self, portal_name,", "val is None: bytes_ += pack('!I', [-1]) else: bytes_ += pack('!I{0}s'.format(len(val)), len(val), val)", "from __future__ import print_function, division, absolute_import from struct import pack from ..message import", "from ..message import BulkFrontendMessage class Bind(BulkFrontendMessage): message_id = b'B' def __init__(self, portal_name, prepared_statement_name,", "..message import BulkFrontendMessage class Bind(BulkFrontendMessage): message_id = b'B' def __init__(self, portal_name, prepared_statement_name, parameter_values):", "print_function, division, absolute_import from struct import pack from ..message import BulkFrontendMessage class Bind(BulkFrontendMessage):", "BulkFrontendMessage class Bind(BulkFrontendMessage): message_id = b'B' def __init__(self, portal_name, prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self) self._portal_name", "len(self._parameter_values)) for val in self._parameter_values.values(): if val is None: bytes_ += pack('!I', [-1])", "self._parameter_values.values(): if val is None: bytes_ += pack('!I', [-1]) else: bytes_ += pack('!I{0}s'.format(len(val)),", "from struct import pack from ..message import BulkFrontendMessage class Bind(BulkFrontendMessage): message_id = b'B'", "portal_name, prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self) self._portal_name = portal_name self._prepared_statement_name = prepared_statement_name self._parameter_values = parameter_values", "class Bind(BulkFrontendMessage): message_id = b'B' def __init__(self, portal_name, prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self) self._portal_name =", "self._parameter_values = parameter_values def read_bytes(self): bytes_ = pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name, 0,", "None: bytes_ += pack('!I', [-1]) else: bytes_ += pack('!I{0}s'.format(len(val)), len(val), val) bytes_ +=", "def __init__(self, portal_name, prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self) self._portal_name = portal_name self._prepared_statement_name = prepared_statement_name self._parameter_values", "message_id = b'B' def __init__(self, portal_name, prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self) self._portal_name = portal_name self._prepared_statement_name", "division, absolute_import from struct import pack from ..message import BulkFrontendMessage class Bind(BulkFrontendMessage): message_id", "= parameter_values def read_bytes(self): bytes_ = pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values))", "parameter_values): BulkFrontendMessage.__init__(self) self._portal_name = portal_name self._prepared_statement_name = prepared_statement_name self._parameter_values = parameter_values def read_bytes(self):", "import BulkFrontendMessage class Bind(BulkFrontendMessage): message_id = b'B' def __init__(self, portal_name, prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self)", "pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values)) for val in self._parameter_values.values(): if val", "self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values)) for val in self._parameter_values.values(): if val is None: bytes_", "len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values)) for val in self._parameter_values.values(): if val is None:", "portal_name self._prepared_statement_name = prepared_statement_name self._parameter_values = parameter_values def read_bytes(self): bytes_ = pack('!{0}sx{1}sxHH'.format( len(self._portal_name),", "Bind(BulkFrontendMessage): message_id = b'B' def __init__(self, portal_name, prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self) self._portal_name = portal_name", "self._prepared_statement_name, 0, len(self._parameter_values)) for val in self._parameter_values.values(): if val is None: bytes_ +=", "= b'B' def __init__(self, portal_name, prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self) self._portal_name = portal_name self._prepared_statement_name =", "[-1]) else: bytes_ += pack('!I{0}s'.format(len(val)), len(val), val) bytes_ += pack('!H', [0]) return bytes_", "<filename>vertica_python/vertica/messages/frontend_messages/bind.py from __future__ import print_function, division, absolute_import from struct import pack from ..message", "bytes_ += pack('!I', [-1]) else: bytes_ += pack('!I{0}s'.format(len(val)), len(val), val) bytes_ += pack('!H',", "struct import pack from ..message import BulkFrontendMessage class Bind(BulkFrontendMessage): message_id = b'B' def", "absolute_import from struct import pack from ..message import BulkFrontendMessage class Bind(BulkFrontendMessage): message_id =", "b'B' def __init__(self, portal_name, prepared_statement_name, parameter_values): BulkFrontendMessage.__init__(self) self._portal_name = portal_name self._prepared_statement_name = prepared_statement_name", "for val in self._parameter_values.values(): if val is None: bytes_ += pack('!I', [-1]) else:", "prepared_statement_name self._parameter_values = parameter_values def read_bytes(self): bytes_ = pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name,", "self._portal_name = portal_name self._prepared_statement_name = prepared_statement_name self._parameter_values = parameter_values def read_bytes(self): bytes_ =", "= pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values)) for val in self._parameter_values.values(): if", "if val is None: bytes_ += pack('!I', [-1]) else: bytes_ += pack('!I{0}s'.format(len(val)), len(val),", "val in self._parameter_values.values(): if val is None: bytes_ += pack('!I', [-1]) else: bytes_", "read_bytes(self): bytes_ = pack('!{0}sx{1}sxHH'.format( len(self._portal_name), len(self._prepared_statement_name)), self._portal_name, self._prepared_statement_name, 0, len(self._parameter_values)) for val in", "import print_function, division, absolute_import from struct import pack from ..message import BulkFrontendMessage class", "in self._parameter_values.values(): if val is None: bytes_ += pack('!I', [-1]) else: bytes_ +=", "0, len(self._parameter_values)) for val in self._parameter_values.values(): if val is None: bytes_ += pack('!I'," ]
[ "the table as a matrix close = np.asmatrix(close) # Print the GOODS correlation", "want to analyze basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] # Define the data source data_source =", "in different groups for i in range(len(AllTickers)): if i < len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\":", "consumerTickers = ['AAPL','PG','BUD','KO','TM'] # Define the data source data_source = 'yahoo' # Define", "a - 1 # In[8]: ## ALL INDUSTRIES # Call the stocks I", "len(financeTickers) + len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\": 2}) elif i < len(financeTickers) + len(techTickers) +", "len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\": 3}) elif i < len(financeTickers) + len(techTickers) + len(servicesTickers) +", "basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] # Define the data source data_source = 'yahoo' # Define", "groups for i in range(len(AllTickers)): if i < len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\": 1}) elif", "len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\": 1}) elif i < len(financeTickers) + len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\": 2})", "In[4]: ## SERVICES # Call the stocks I want to analyze servicesTickers =", "print a - 1 # In[8]: ## ALL INDUSTRIES # Call the stocks", "2}) elif i < len(financeTickers) + len(techTickers) + len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\": 3}) elif", "i in range(len(AllTickers)): if i < len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\": 1}) elif i <", "\"group\": 3}) elif i < len(financeTickers) + len(techTickers) + len(servicesTickers) + len(basicTickers): nodes.append({\"id\":AllTickers[i],", "the proper format for the network nodes = [] # define the different", "connections with eachother to the correlation matrix. for i in range(len(AllTickers)): for j", "data panel_data = data.DataReader(servicesTickers, data_source, start_date, end_date) # Getting just the adjusted closing", "i < len(financeTickers) + len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\": 2}) elif i < len(financeTickers) +", "nodes.append({\"id\":AllTickers[i], \"group\": 1}) elif i < len(financeTickers) + len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\": 2}) elif", "freq='B') # How do we align the existing prices in adj_close with our", "# How do we align the existing prices in adj_close with our new", "dates? # All we need to do is reindex close using all_weekdays as", "panel_data. close = panel_data.loc['Close'] # Getting all weekdays between 01/01/2000 and 12/31/2016 all_weekdays", "matrix print np.corrcoef(close, rowvar=False) # In[45]: ## ALL INDUSTRIES # Call the stocks", "of the companies wasn't public yet close = close.dropna(axis=0, how='any') # normalize de", "matrix close = np.asmatrix(close) # Print the GOODS correlation matrix print np.corrcoef(close, rowvar=False)", "is the major index of the panel_data. close = panel_data.loc['Close'] # Getting all", "I want to analyze basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] # Define the data source data_source", "the table as a matrix close = np.asmatrix(close) # Print the MATERIALS correlation", "how='any') # Define the table as a matrix close = np.matrix(close) # Print", "close using all_weekdays as the new index close = close.reindex(all_weekdays) # Drop the", "# All we need to do is reindex close using all_weekdays as the", "matplotlib.pyplot as plt import pandas as pd import numpy as np import json", "In[2]: ## FINANCIAL INDUSTRY # Call the stocks I want to analyze financeTickers", "public yet close = close.dropna(axis=0, how='any') # normalize de data by defining relative", "\"links\": links } network = json.dumps(json_data) # copied this print into a downloaded", "the dates where one of the companies wasn't public yet close = close.dropna(axis=0,", "= ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define the data source data_source = 'yahoo' # Define the", "Define and print the correlation matrix in absolute values close = np.corrcoef(close, rowvar=False)", "the companies wasn't public yet close = close.dropna(axis=0, how='any') # normalize de data", "# Define the data source data_source = 'yahoo' # Define the time-scale start_date", "correlation matrix print np.corrcoef(close, rowvar=False) # In[6]: ## CONSUMER GOODS # Call the", "the table as a matrix close = np.asmatrix(close) # Print the TECH correlation", "end_date = '2018-01-01' # Get the data panel_data = data.DataReader(financeTickers, data_source, start_date, end_date)", "# Print the TECH correlation matrix print np.corrcoef(close, rowvar=False) # In[4]: ## SERVICES", "table as a matrix close = np.asmatrix(close) # Print the GOODS correlation matrix", "of the companies wasn't public yet close = close.dropna(axis=0, how='any') # Define the", "matrix close = np.asmatrix(close) # Print the TECH correlation matrix print np.corrcoef(close, rowvar=False)", "to analyze consumerTickers = ['AAPL','PG','BUD','KO','TM'] # Define the data source data_source = 'yahoo'", "['GOOGL','MSFT','FB','T','VZ'] # Define the data source data_source = 'yahoo' # Define the time-scale", "yet close = close.dropna(axis=0, how='any') # normalize de data by defining relative gain.", "the data panel_data = data.DataReader(servicesTickers, data_source, start_date, end_date) # Getting just the adjusted", "analyze basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] # Define the data source data_source = 'yahoo' #", "links = [] # Go through the stocks and link the stocks and", "Getting all weekdays between 01/01/2000 and 12/31/2016 all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B') #", "len(techTickers) + len(servicesTickers) + len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\": 4}) else: nodes.append({\"id\":AllTickers[i], \"group\": 5}) links", "of the panel_data. close = panel_data.loc['Close'] # Getting all weekdays between 01/01/2000 and", "# copied this print into a downloaded json file. print network # In[29]:", "01/01/2000 and 12/31/2016 all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B') # How do we align", "import io # In[2]: ## FINANCIAL INDUSTRY # Call the stocks I want", "= np.asmatrix(close) # Print the MATERIALS correlation matrix print np.corrcoef(close, rowvar=False) # In[6]:", "as a matrix close = np.matrix(close) # Print the correlation matrix c =", "import json import io # In[2]: ## FINANCIAL INDUSTRY # Call the stocks", "the stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] techTickers = ['GOOGL','MSFT','FB','T','VZ'] servicesTickers", "Getting just the adjusted closing prices. This will return a Pandas DataFrame #", "yet close = close.dropna(axis=0, how='any') # Define the table as a matrix close", "matrix print np.corrcoef(close, rowvar=False) # In[6]: ## CONSUMER GOODS # Call the stocks", "= ['AAPL','PG','BUD','KO','TM'] # Define the data source data_source = 'yahoo' # Define the", "print the correlation matrix in absolute values close = np.corrcoef(close, rowvar=False) a =", "table as a matrix close = np.asmatrix(close) # Print the TECH correlation matrix", "panel_data = data.DataReader(consumerTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices.", "+ len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\": 2}) elif i < len(financeTickers) + len(techTickers) + len(servicesTickers):", "data.DataReader(techTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This will", "Define the table as a matrix close = np.asmatrix(close) # Print the TECH", "<gh_stars>0 # coding: utf-8 # In[1]: from pandas_datareader import data import matplotlib.pyplot as", "# Call the stocks I want to analyze techTickers = ['GOOGL','MSFT','FB','T','VZ'] # Define", "for j in range(1,len(AllTickers) - i): links.append({\"source\" : AllTickers[i],\"target\" : AllTickers[i + j],\"value\"", "correlation matrix print np.corrcoef(close, rowvar=False) # In[4]: ## SERVICES # Call the stocks", "one of the companies wasn't public yet close = close.dropna(axis=0, how='any') # Define", "new set of dates? # All we need to do is reindex close", "elif i < len(financeTickers) + len(techTickers) + len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\": 3}) elif i", "between 01/01/2000 and 12/31/2016 all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B') # How do we", "Print the correlation matrix c = np.corrcoef(close, rowvar=False) # manipulate the data so", "if i < len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\": 1}) elif i < len(financeTickers) + len(techTickers):", "['JPM','BAC','WFC','V','C'] techTickers = ['GOOGL','MSFT','FB','T','VZ'] servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers = ['AAPL','PG','BUD','KO','TM']", "np.asmatrix(close) # Print the MATERIALS correlation matrix print np.corrcoef(close, rowvar=False) # In[6]: ##", "it in different groups for i in range(len(AllTickers)): if i < len(financeTickers): nodes.append({\"id\":AllTickers[i],", "major index of the panel_data. close = panel_data.loc['Close'] # Getting all weekdays between", "the stocks I want to analyze servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] # Define the data", "INDUSTRY # Call the stocks I want to analyze techTickers = ['GOOGL','MSFT','FB','T','VZ'] #", "= json.dumps(json_data) # copied this print into a downloaded json file. print network", "In[1]: from pandas_datareader import data import matplotlib.pyplot as plt import pandas as pd", "DataFrame # The index in this DataFrame is the major index of the", "as a matrix close = np.matrix(close) # Define and print the correlation matrix", "as a matrix close = np.asmatrix(close) # Print the FINANCE correlation matrix print", "print network # In[29]: ## ALL INDUSTRIES # Call the stocks I want", "techTickers = ['GOOGL','MSFT','FB','T','VZ'] servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers = ['AAPL','PG','BUD','KO','TM'] #", "np.asmatrix(close) # Print the FINANCE correlation matrix print np.corrcoef(close, rowvar=False) # In[3]: ##", "['JPM','BAC','WFC','V','C'] # Define the data source data_source = 'yahoo' # Define the time-scale", "AllTickers[i],\"target\" : AllTickers[i + j],\"value\" : c[i,i+j]}) # bring together the two dictionaries", "index close = close.reindex(all_weekdays) # Drop the dates where one of the companies", "# Get the data panel_data = data.DataReader(AllTickers, data_source, start_date, end_date) # Getting just", "table as a matrix close = np.asmatrix(close) # Print the MATERIALS correlation matrix", "and link the stocks and connections with eachother to the correlation matrix. for", "# Print the MATERIALS correlation matrix print np.corrcoef(close, rowvar=False) # In[6]: ## CONSUMER", "together the two dictionaries into one big dict json_data = { \"nodes\": nodes,", "the two dictionaries into one big dict json_data = { \"nodes\": nodes, \"links\":", "np.corrcoef(close, rowvar=False) # In[6]: ## CONSUMER GOODS # Call the stocks I want", "techTickers = ['GOOGL','MSFT','FB','T','VZ'] # Define the data source data_source = 'yahoo' # Define", "stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] # Define the data source", "SERVICES correlation matrix print np.corrcoef(close, rowvar=False) # In[5]: ## BASIC MATERIALS # Call", "set of dates? # All we need to do is reindex close using", "1 # In[8]: ## ALL INDUSTRIES # Call the stocks I want to", "= ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers = ['AAPL','PG','BUD','KO','TM'] # group all tickers together AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM']", "# Define the table as a matrix close = np.asmatrix(close) # Print the", "as a matrix close = np.asmatrix(close) # Print the TECH correlation matrix print", "The index in this DataFrame is the major index of the panel_data. close", "the TECH correlation matrix print np.corrcoef(close, rowvar=False) # In[4]: ## SERVICES # Call", "Call the stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] # Define the", "big dict json_data = { \"nodes\": nodes, \"links\": links } network = json.dumps(json_data)", "gain. starting at the first price 1.0 close = close/close.iloc[0, :] close.to_csv('price_relative_gain.csv', encoding='utf-8')", "= '2018-01-01' # Get the data panel_data = data.DataReader(financeTickers, data_source, start_date, end_date) #", "np.corrcoef(close, rowvar=False) # In[45]: ## ALL INDUSTRIES # Call the stocks I want", "bring together the two dictionaries into one big dict json_data = { \"nodes\":", "return a Pandas DataFrame # The index in this DataFrame is the major", "'2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(basicTickers, data_source, start_date,", "# Go through the stocks and link the stocks and connections with eachother", "How do we align the existing prices in adj_close with our new set", "elif i < len(financeTickers) + len(techTickers) + len(servicesTickers) + len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\": 4})", "close = close.reindex(all_weekdays) # Drop the dates where one of the companies wasn't", "np.zeros(len(AllTickers)) for i in range(len(AllTickers)): a[i] = np.sum(abs(close[i])) print a - 1 #", "MATERIALS correlation matrix print np.corrcoef(close, rowvar=False) # In[6]: ## CONSUMER GOODS # Call", "do we align the existing prices in adj_close with our new set of", "to do is reindex close using all_weekdays as the new index close =", "close.dropna(axis=0, how='any') # Define the table as a matrix close = np.asmatrix(close) #", "= data.DataReader(techTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This", "end_date = '2018-01-01' # Get the data panel_data = data.DataReader(basicTickers, data_source, start_date, end_date)", "the data panel_data = data.DataReader(techTickers, data_source, start_date, end_date) # Getting just the adjusted", "the existing prices in adj_close with our new set of dates? # All", "DataFrame is the major index of the panel_data close = panel_data.loc['Close'] # Getting", "1}) elif i < len(financeTickers) + len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\": 2}) elif i <", "closing prices. This will return a Pandas DataFrame # The index in this", "want to analyze techTickers = ['GOOGL','MSFT','FB','T','VZ'] # Define the data source data_source =", "np.corrcoef(close, rowvar=False) # manipulate the data so that I can output the proper", "where one of the companies wasn't public yet close = close.dropna(axis=0, how='any') #", "the network nodes = [] # define the different industries by seperating it", "panel_data.loc['Close'] # Getting all weekdays between 01/01/2000 and 12/31/2016 all_weekdays = pd.date_range(start=start_date, end=end_date,", "Print the FINANCE correlation matrix print np.corrcoef(close, rowvar=False) # In[3]: ## TECH INDUSTRY", "links } network = json.dumps(json_data) # copied this print into a downloaded json", "panel_data = data.DataReader(servicesTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices.", "# In[29]: ## ALL INDUSTRIES # Call the stocks I want to analyze", "correlation matrix print np.corrcoef(close, rowvar=False) # In[45]: ## ALL INDUSTRIES # Call the", "Drop the dates where one of the companies wasn't public yet close =", "start_date = '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(servicesTickers,", "panel_data = data.DataReader(financeTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices.", "Pandas DataFrame # The index in this DataFrame is the major index of", "= np.zeros(len(AllTickers)) for i in range(len(AllTickers)): a[i] = np.sum(abs(close[i])) print a - 1", "to analyze techTickers = ['GOOGL','MSFT','FB','T','VZ'] # Define the data source data_source = 'yahoo'", "= np.matrix(close) # Define and print the correlation matrix in absolute values close", "# coding: utf-8 # In[1]: from pandas_datareader import data import matplotlib.pyplot as plt", "= '2018-01-01' # Get the data panel_data = data.DataReader(AllTickers, data_source, start_date, end_date) #", "a[i] = np.sum(abs(close[i])) print a - 1 # In[8]: ## ALL INDUSTRIES #", "< len(financeTickers) + len(techTickers) + len(servicesTickers) + len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\": 4}) else: nodes.append({\"id\":AllTickers[i],", "# bring together the two dictionaries into one big dict json_data = {", "nodes.append({\"id\":AllTickers[i], \"group\": 4}) else: nodes.append({\"id\":AllTickers[i], \"group\": 5}) links = [] # Go through", "= '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(financeTickers, data_source,", "Define the time-scale start_date = '2000-01-01' end_date = '2018-01-01' # Get the data", "= close.dropna(axis=0, how='any') # Define the table as a matrix close = np.matrix(close)", "stocks I want to analyze basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] # Define the data source", "align the existing prices in adj_close with our new set of dates? #", "the data panel_data = data.DataReader(AllTickers, data_source, start_date, end_date) # Getting just the adjusted", "that I can output the proper format for the network nodes = []", "print np.corrcoef(close, rowvar=False) # In[3]: ## TECH INDUSTRY # Call the stocks I", "np.matrix(close) # Define and print the correlation matrix in absolute values close =", "data.DataReader(basicTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This will", "rowvar=False) # manipulate the data so that I can output the proper format", "= close.dropna(axis=0, how='any') # Define the table as a matrix close = np.asmatrix(close)", "# manipulate the data so that I can output the proper format for", "json file. print network # In[29]: ## ALL INDUSTRIES # Call the stocks", "# Print the FINANCE correlation matrix print np.corrcoef(close, rowvar=False) # In[3]: ## TECH", "industries by seperating it in different groups for i in range(len(AllTickers)): if i", "'2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(techTickers, data_source, start_date,", "to the correlation matrix. for i in range(len(AllTickers)): for j in range(1,len(AllTickers) -", "# Print the SERVICES correlation matrix print np.corrcoef(close, rowvar=False) # In[5]: ## BASIC", "defining relative gain. starting at the first price 1.0 close = close/close.iloc[0, :]", "# Call the stocks I want to analyze servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] # Define", "dates where one of the companies wasn't public yet close = close.dropna(axis=0, how='any')", "table as a matrix close = np.asmatrix(close) # Print the SERVICES correlation matrix", "a matrix close = np.asmatrix(close) # Print the GOODS correlation matrix print np.corrcoef(close,", "= ['GOOGL','MSFT','FB','T','VZ'] # Define the data source data_source = 'yahoo' # Define the", "I want to analyze consumerTickers = ['AAPL','PG','BUD','KO','TM'] # Define the data source data_source", "the table as a matrix close = np.asmatrix(close) # Print the SERVICES correlation", "close = np.asmatrix(close) # Print the SERVICES correlation matrix print np.corrcoef(close, rowvar=False) #", "want to analyze consumerTickers = ['AAPL','PG','BUD','KO','TM'] # Define the data source data_source =", "data panel_data = data.DataReader(techTickers, data_source, start_date, end_date) # Getting just the adjusted closing", "data panel_data = data.DataReader(consumerTickers, data_source, start_date, end_date) # Getting just the adjusted closing", "the stocks and link the stocks and connections with eachother to the correlation", "# normalize de data by defining relative gain. starting at the first price", "network = json.dumps(json_data) # copied this print into a downloaded json file. print", "# In[6]: ## CONSUMER GOODS # Call the stocks I want to analyze", "'2018-01-01' # Get the data panel_data = data.DataReader(consumerTickers, data_source, start_date, end_date) # Getting", "is the major index of the panel_data close = panel_data.loc['Close'] # Getting all", "pd import numpy as np import json import io # In[2]: ## FINANCIAL", "c[i,i+j]}) # bring together the two dictionaries into one big dict json_data =", "Go through the stocks and link the stocks and connections with eachother to", "matrix print np.corrcoef(close, rowvar=False) # In[3]: ## TECH INDUSTRY # Call the stocks", "j in range(1,len(AllTickers) - i): links.append({\"source\" : AllTickers[i],\"target\" : AllTickers[i + j],\"value\" :", "data_source, start_date, end_date) # Getting just the adjusted closing prices. This will return", "servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] # Define the data source data_source = 'yahoo' # Define", "for i in range(len(AllTickers)): for j in range(1,len(AllTickers) - i): links.append({\"source\" : AllTickers[i],\"target\"", "This will return a Pandas DataFrame # The index in this DataFrame is", "np.corrcoef(close, rowvar=False) a = np.zeros(len(AllTickers)) for i in range(len(AllTickers)): a[i] = np.sum(abs(close[i])) print", "values close = np.corrcoef(close, rowvar=False) a = np.zeros(len(AllTickers)) for i in range(len(AllTickers)): a[i]", "range(len(AllTickers)): a[i] = np.sum(abs(close[i])) print a - 1 # In[8]: ## ALL INDUSTRIES", "i < len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\": 1}) elif i < len(financeTickers) + len(techTickers): nodes.append({\"id\":AllTickers[i],", "# Call the stocks I want to analyze basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] # Define", "the correlation matrix in absolute values close = np.corrcoef(close, rowvar=False) a = np.zeros(len(AllTickers))", "correlation matrix. for i in range(len(AllTickers)): for j in range(1,len(AllTickers) - i): links.append({\"source\"", "the new index close = close.reindex(all_weekdays) # Drop the dates where one of", "companies wasn't public yet close = close.dropna(axis=0, how='any') # normalize de data by", "the data panel_data = data.DataReader(basicTickers, data_source, start_date, end_date) # Getting just the adjusted", "in range(1,len(AllTickers) - i): links.append({\"source\" : AllTickers[i],\"target\" : AllTickers[i + j],\"value\" : c[i,i+j]})", "as a matrix close = np.asmatrix(close) # Print the GOODS correlation matrix print", "['AAPL','PG','BUD','KO','TM'] # group all tickers together AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define the data", "FINANCIAL INDUSTRY # Call the stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C']", "np.corrcoef(close, rowvar=False) # In[5]: ## BASIC MATERIALS # Call the stocks I want", "In[6]: ## CONSUMER GOODS # Call the stocks I want to analyze consumerTickers", "and connections with eachother to the correlation matrix. for i in range(len(AllTickers)): for", "major index of the panel_data close = panel_data.loc['Close'] # Getting all weekdays between", "# Getting just the adjusted closing prices. This will return a Pandas DataFrame", "end=end_date, freq='B') # How do we align the existing prices in adj_close with", "INDUSTRY # Call the stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] #", "close = np.asmatrix(close) # Print the GOODS correlation matrix print np.corrcoef(close, rowvar=False) #", "rowvar=False) # In[3]: ## TECH INDUSTRY # Call the stocks I want to", "close.reindex(all_weekdays) # Drop the dates where one of the companies wasn't public yet", "the SERVICES correlation matrix print np.corrcoef(close, rowvar=False) # In[5]: ## BASIC MATERIALS #", "len(servicesTickers) + len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\": 4}) else: nodes.append({\"id\":AllTickers[i], \"group\": 5}) links = []", "correlation matrix c = np.corrcoef(close, rowvar=False) # manipulate the data so that I", "analyze techTickers = ['GOOGL','MSFT','FB','T','VZ'] # Define the data source data_source = 'yahoo' #", ": AllTickers[i],\"target\" : AllTickers[i + j],\"value\" : c[i,i+j]}) # bring together the two", "5}) links = [] # Go through the stocks and link the stocks", "j],\"value\" : c[i,i+j]}) # bring together the two dictionaries into one big dict", "# Get the data panel_data = data.DataReader(financeTickers, data_source, start_date, end_date) # Getting just", "- i): links.append({\"source\" : AllTickers[i],\"target\" : AllTickers[i + j],\"value\" : c[i,i+j]}) # bring", "+ len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\": 4}) else: nodes.append({\"id\":AllTickers[i], \"group\": 5}) links = [] #", "financeTickers = ['JPM','BAC','WFC','V','C'] # Define the data source data_source = 'yahoo' # Define", "dictionaries into one big dict json_data = { \"nodes\": nodes, \"links\": links }", "correlation matrix print np.corrcoef(close, rowvar=False) # In[3]: ## TECH INDUSTRY # Call the", "together AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define the data source data_source = 'yahoo' #", "close.dropna(axis=0, how='any') # Define the table as a matrix close = np.matrix(close) #", "= '2018-01-01' # Get the data panel_data = data.DataReader(techTickers, data_source, start_date, end_date) #", "= data.DataReader(basicTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This", "np import json import io # In[2]: ## FINANCIAL INDUSTRY # Call the", "FINANCE correlation matrix print np.corrcoef(close, rowvar=False) # In[3]: ## TECH INDUSTRY # Call", "output the proper format for the network nodes = [] # define the", "two dictionaries into one big dict json_data = { \"nodes\": nodes, \"links\": links", "# In[3]: ## TECH INDUSTRY # Call the stocks I want to analyze", "Define the table as a matrix close = np.asmatrix(close) # Print the FINANCE", "the companies wasn't public yet close = close.dropna(axis=0, how='any') # Define the table", "rowvar=False) # In[6]: ## CONSUMER GOODS # Call the stocks I want to", "['AMZN','BABA','WMT','HD','CMCSA'] basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers = ['AAPL','PG','BUD','KO','TM'] # group all tickers together AllTickers", "start_date = '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(AllTickers,", "through the stocks and link the stocks and connections with eachother to the", "the GOODS correlation matrix print np.corrcoef(close, rowvar=False) # In[45]: ## ALL INDUSTRIES #", "with eachother to the correlation matrix. for i in range(len(AllTickers)): for j in", "absolute values close = np.corrcoef(close, rowvar=False) a = np.zeros(len(AllTickers)) for i in range(len(AllTickers)):", "## SERVICES # Call the stocks I want to analyze servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA']", "'2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(servicesTickers, data_source, start_date,", "i in range(len(AllTickers)): for j in range(1,len(AllTickers) - i): links.append({\"source\" : AllTickers[i],\"target\" :", "the time-scale start_date = '2000-01-01' end_date = '2018-01-01' # Get the data panel_data", "I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] techTickers = ['GOOGL','MSFT','FB','T','VZ'] servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA']", "= np.matrix(close) # Print the correlation matrix c = np.corrcoef(close, rowvar=False) # manipulate", "in this DataFrame is the major index of the panel_data close = panel_data.loc['Close']", "Define the table as a matrix close = np.matrix(close) # Define and print", "c = np.corrcoef(close, rowvar=False) # manipulate the data so that I can output", "into a downloaded json file. print network # In[29]: ## ALL INDUSTRIES #", "prices in adj_close with our new set of dates? # All we need", "reindex close using all_weekdays as the new index close = close.reindex(all_weekdays) # Drop", "= 'yahoo' # Define the time-scale start_date = '2000-01-01' end_date = '2018-01-01' #", "as np import json import io # In[2]: ## FINANCIAL INDUSTRY # Call", "data source data_source = 'yahoo' # Define the time-scale start_date = '2000-01-01' end_date", "wasn't public yet close = close.dropna(axis=0, how='any') # Define the table as a", "[] # Go through the stocks and link the stocks and connections with", "range(1,len(AllTickers) - i): links.append({\"source\" : AllTickers[i],\"target\" : AllTickers[i + j],\"value\" : c[i,i+j]}) #", "copied this print into a downloaded json file. print network # In[29]: ##", "np.matrix(close) # Print the correlation matrix c = np.corrcoef(close, rowvar=False) # manipulate the", "of dates? # All we need to do is reindex close using all_weekdays", "to analyze financeTickers = ['JPM','BAC','WFC','V','C'] techTickers = ['GOOGL','MSFT','FB','T','VZ'] servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers =", "pandas as pd import numpy as np import json import io # In[2]:", "panel_data = data.DataReader(techTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices.", "the stocks and connections with eachother to the correlation matrix. for i in", "I can output the proper format for the network nodes = [] #", "data so that I can output the proper format for the network nodes", "I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] # Define the data source data_source", "print np.corrcoef(close, rowvar=False) # In[45]: ## ALL INDUSTRIES # Call the stocks I", "this print into a downloaded json file. print network # In[29]: ## ALL", "pd.date_range(start=start_date, end=end_date, freq='B') # How do we align the existing prices in adj_close", "i < len(financeTickers) + len(techTickers) + len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\": 3}) elif i <", "different industries by seperating it in different groups for i in range(len(AllTickers)): if", "the data panel_data = data.DataReader(financeTickers, data_source, start_date, end_date) # Getting just the adjusted", "import numpy as np import json import io # In[2]: ## FINANCIAL INDUSTRY", "eachother to the correlation matrix. for i in range(len(AllTickers)): for j in range(1,len(AllTickers)", "All we need to do is reindex close using all_weekdays as the new", "np.asmatrix(close) # Print the SERVICES correlation matrix print np.corrcoef(close, rowvar=False) # In[5]: ##", "= close.dropna(axis=0, how='any') # normalize de data by defining relative gain. starting at", "the panel_data. close = panel_data.loc['Close'] # Getting all weekdays between 01/01/2000 and 12/31/2016", "the major index of the panel_data. close = panel_data.loc['Close'] # Getting all weekdays", "len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\": 4}) else: nodes.append({\"id\":AllTickers[i], \"group\": 5}) links = [] # Go", "\"group\": 5}) links = [] # Go through the stocks and link the", "from pandas_datareader import data import matplotlib.pyplot as plt import pandas as pd import", "Call the stocks I want to analyze consumerTickers = ['AAPL','PG','BUD','KO','TM'] # Define the", "len(financeTickers) + len(techTickers) + len(servicesTickers) + len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\": 4}) else: nodes.append({\"id\":AllTickers[i], \"group\":", "'2018-01-01' # Get the data panel_data = data.DataReader(AllTickers, data_source, start_date, end_date) # Getting", "matrix. for i in range(len(AllTickers)): for j in range(1,len(AllTickers) - i): links.append({\"source\" :", "a matrix close = np.asmatrix(close) # Print the FINANCE correlation matrix print np.corrcoef(close,", "+ len(techTickers) + len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\": 3}) elif i < len(financeTickers) + len(techTickers)", "format for the network nodes = [] # define the different industries by", "# Get the data panel_data = data.DataReader(servicesTickers, data_source, start_date, end_date) # Getting just", "plt import pandas as pd import numpy as np import json import io", "'2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(financeTickers, data_source, start_date,", ": c[i,i+j]}) # bring together the two dictionaries into one big dict json_data", "[] # define the different industries by seperating it in different groups for", "in range(len(AllTickers)): for j in range(1,len(AllTickers) - i): links.append({\"source\" : AllTickers[i],\"target\" : AllTickers[i", "analyze servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] # Define the data source data_source = 'yahoo' #", "by seperating it in different groups for i in range(len(AllTickers)): if i <", "# Print the correlation matrix c = np.corrcoef(close, rowvar=False) # manipulate the data", "close = np.asmatrix(close) # Print the FINANCE correlation matrix print np.corrcoef(close, rowvar=False) #", "matrix close = np.asmatrix(close) # Print the SERVICES correlation matrix print np.corrcoef(close, rowvar=False)", "just the adjusted closing prices. This will return a Pandas DataFrame # The", "Call the stocks I want to analyze basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] # Define the", "I want to analyze servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] # Define the data source data_source", "# Getting all weekdays between 01/01/2000 and 12/31/2016 all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')", "analyze consumerTickers = ['AAPL','PG','BUD','KO','TM'] # Define the data source data_source = 'yahoo' #", "data.DataReader(AllTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This will", "stocks and connections with eachother to the correlation matrix. for i in range(len(AllTickers)):", "Get the data panel_data = data.DataReader(basicTickers, data_source, start_date, end_date) # Getting just the", "define the different industries by seperating it in different groups for i in", "['GOOGL','MSFT','FB','T','VZ'] servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers = ['AAPL','PG','BUD','KO','TM'] # group all", "weekdays between 01/01/2000 and 12/31/2016 all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B') # How do", "BASIC MATERIALS # Call the stocks I want to analyze basicTickers = ['XOM','RDS-B','PTR','CVX','BP']", "numpy as np import json import io # In[2]: ## FINANCIAL INDUSTRY #", "existing prices in adj_close with our new set of dates? # All we", "nodes = [] # define the different industries by seperating it in different", "this DataFrame is the major index of the panel_data close = panel_data.loc['Close'] #", "as pd import numpy as np import json import io # In[2]: ##", "Call the stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] techTickers = ['GOOGL','MSFT','FB','T','VZ']", "['XOM','RDS-B','PTR','CVX','BP'] consumerTickers = ['AAPL','PG','BUD','KO','TM'] # group all tickers together AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] #", "MATERIALS # Call the stocks I want to analyze basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] #", "# In[1]: from pandas_datareader import data import matplotlib.pyplot as plt import pandas as", "data.DataReader(financeTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This will", "i in range(len(AllTickers)): a[i] = np.sum(abs(close[i])) print a - 1 # In[8]: ##", "wasn't public yet close = close.dropna(axis=0, how='any') # normalize de data by defining", "'2018-01-01' # Get the data panel_data = data.DataReader(servicesTickers, data_source, start_date, end_date) # Getting", "start_date = '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(financeTickers,", "# Call the stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] # Define", "io # In[2]: ## FINANCIAL INDUSTRY # Call the stocks I want to", "< len(financeTickers) + len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\": 2}) elif i < len(financeTickers) + len(techTickers)", "'2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(consumerTickers, data_source, start_date,", "'2018-01-01' # Get the data panel_data = data.DataReader(financeTickers, data_source, start_date, end_date) # Getting", "want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] techTickers = ['GOOGL','MSFT','FB','T','VZ'] servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers", "one of the companies wasn't public yet close = close.dropna(axis=0, how='any') # normalize", "= panel_data.loc['Close'] # Getting all weekdays between 01/01/2000 and 12/31/2016 all_weekdays = pd.date_range(start=start_date,", "# Define the table as a matrix close = np.matrix(close) # Print the", "= ['XOM','RDS-B','PTR','CVX','BP'] # Define the data source data_source = 'yahoo' # Define the", "start_date = '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(basicTickers,", "data import matplotlib.pyplot as plt import pandas as pd import numpy as np", "the data panel_data = data.DataReader(consumerTickers, data_source, start_date, end_date) # Getting just the adjusted", "start_date = '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(techTickers,", "+ j],\"value\" : c[i,i+j]}) # bring together the two dictionaries into one big", "one big dict json_data = { \"nodes\": nodes, \"links\": links } network =", "np.asmatrix(close) # Print the TECH correlation matrix print np.corrcoef(close, rowvar=False) # In[4]: ##", "end_date = '2018-01-01' # Get the data panel_data = data.DataReader(AllTickers, data_source, start_date, end_date)", "end_date = '2018-01-01' # Get the data panel_data = data.DataReader(servicesTickers, data_source, start_date, end_date)", "## TECH INDUSTRY # Call the stocks I want to analyze techTickers =", "relative gain. starting at the first price 1.0 close = close/close.iloc[0, :] close.to_csv('price_relative_gain.csv',", "## BASIC MATERIALS # Call the stocks I want to analyze basicTickers =", "Get the data panel_data = data.DataReader(techTickers, data_source, start_date, end_date) # Getting just the", "utf-8 # In[1]: from pandas_datareader import data import matplotlib.pyplot as plt import pandas", "matrix close = np.asmatrix(close) # Print the MATERIALS correlation matrix print np.corrcoef(close, rowvar=False)", "table as a matrix close = np.matrix(close) # Define and print the correlation", "all weekdays between 01/01/2000 and 12/31/2016 all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B') # How", "all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B') # How do we align the existing prices", "= np.asmatrix(close) # Print the FINANCE correlation matrix print np.corrcoef(close, rowvar=False) # In[3]:", "all tickers together AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define the data source data_source =", "< len(financeTickers) + len(techTickers) + len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\": 3}) elif i < len(financeTickers)", "4}) else: nodes.append({\"id\":AllTickers[i], \"group\": 5}) links = [] # Go through the stocks", "## FINANCIAL INDUSTRY # Call the stocks I want to analyze financeTickers =", "# The index in this DataFrame is the major index of the panel_data.", "panel_data = data.DataReader(basicTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices.", "GOODS # Call the stocks I want to analyze consumerTickers = ['AAPL','PG','BUD','KO','TM'] #", "\"group\": 1}) elif i < len(financeTickers) + len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\": 2}) elif i", "close = np.corrcoef(close, rowvar=False) a = np.zeros(len(AllTickers)) for i in range(len(AllTickers)): a[i] =", "= ['JPM','BAC','WFC','V','C'] # Define the data source data_source = 'yahoo' # Define the", "time-scale start_date = '2000-01-01' end_date = '2018-01-01' # Get the data panel_data =", "['AAPL','PG','BUD','KO','TM'] # Define the data source data_source = 'yahoo' # Define the time-scale", "< len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\": 1}) elif i < len(financeTickers) + len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\":", "= data.DataReader(financeTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This", "for i in range(len(AllTickers)): if i < len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\": 1}) elif i", "# Define the time-scale start_date = '2000-01-01' end_date = '2018-01-01' # Get the", "in range(len(AllTickers)): a[i] = np.sum(abs(close[i])) print a - 1 # In[8]: ## ALL", "pandas_datareader import data import matplotlib.pyplot as plt import pandas as pd import numpy", "# In[8]: ## ALL INDUSTRIES # Call the stocks I want to analyze", "# In[2]: ## FINANCIAL INDUSTRY # Call the stocks I want to analyze", "the correlation matrix. for i in range(len(AllTickers)): for j in range(1,len(AllTickers) - i):", "= np.asmatrix(close) # Print the SERVICES correlation matrix print np.corrcoef(close, rowvar=False) # In[5]:", "stocks and link the stocks and connections with eachother to the correlation matrix.", "the table as a matrix close = np.matrix(close) # Print the correlation matrix", "in absolute values close = np.corrcoef(close, rowvar=False) a = np.zeros(len(AllTickers)) for i in", "= '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(basicTickers, data_source,", "# group all tickers together AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define the data source", "prices. This will return a Pandas DataFrame # The index in this DataFrame", "= { \"nodes\": nodes, \"links\": links } network = json.dumps(json_data) # copied this", "= ['AMZN','BABA','WMT','HD','CMCSA'] # Define the data source data_source = 'yahoo' # Define the", "np.sum(abs(close[i])) print a - 1 # In[8]: ## ALL INDUSTRIES # Call the", "we need to do is reindex close using all_weekdays as the new index", "the FINANCE correlation matrix print np.corrcoef(close, rowvar=False) # In[3]: ## TECH INDUSTRY #", "close.dropna(axis=0, how='any') # normalize de data by defining relative gain. starting at the", "as the new index close = close.reindex(all_weekdays) # Drop the dates where one", "Print the GOODS correlation matrix print np.corrcoef(close, rowvar=False) # In[45]: ## ALL INDUSTRIES", "3}) elif i < len(financeTickers) + len(techTickers) + len(servicesTickers) + len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\":", "data by defining relative gain. starting at the first price 1.0 close =", "financeTickers = ['JPM','BAC','WFC','V','C'] techTickers = ['GOOGL','MSFT','FB','T','VZ'] servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers", "+ len(servicesTickers) + len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\": 4}) else: nodes.append({\"id\":AllTickers[i], \"group\": 5}) links =", "print np.corrcoef(close, rowvar=False) # In[5]: ## BASIC MATERIALS # Call the stocks I", "i): links.append({\"source\" : AllTickers[i],\"target\" : AllTickers[i + j],\"value\" : c[i,i+j]}) # bring together", "matrix close = np.matrix(close) # Define and print the correlation matrix in absolute", "= '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(servicesTickers, data_source,", "to analyze financeTickers = ['JPM','BAC','WFC','V','C'] # Define the data source data_source = 'yahoo'", "new index close = close.reindex(all_weekdays) # Drop the dates where one of the", "de data by defining relative gain. starting at the first price 1.0 close", "adjusted closing prices. This will return a Pandas DataFrame # The index in", "how='any') # Define the table as a matrix close = np.matrix(close) # Define", "our new set of dates? # All we need to do is reindex", "the correlation matrix c = np.corrcoef(close, rowvar=False) # manipulate the data so that", "can output the proper format for the network nodes = [] # define", "correlation matrix print np.corrcoef(close, rowvar=False) # In[5]: ## BASIC MATERIALS # Call the", "stocks I want to analyze consumerTickers = ['AAPL','PG','BUD','KO','TM'] # Define the data source", "np.corrcoef(close, rowvar=False) # In[3]: ## TECH INDUSTRY # Call the stocks I want", "data_source = 'yahoo' # Define the time-scale start_date = '2000-01-01' end_date = '2018-01-01'", "close = np.matrix(close) # Print the correlation matrix c = np.corrcoef(close, rowvar=False) #", "json import io # In[2]: ## FINANCIAL INDUSTRY # Call the stocks I", "in this DataFrame is the major index of the panel_data. close = panel_data.loc['Close']", "In[8]: ## ALL INDUSTRIES # Call the stocks I want to analyze financeTickers", "= '2018-01-01' # Get the data panel_data = data.DataReader(servicesTickers, data_source, start_date, end_date) #", "nodes, \"links\": links } network = json.dumps(json_data) # copied this print into a", "= close.reindex(all_weekdays) # Drop the dates where one of the companies wasn't public", "AllTickers[i + j],\"value\" : c[i,i+j]}) # bring together the two dictionaries into one", "INDUSTRIES # Call the stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] techTickers", "close = np.asmatrix(close) # Print the TECH correlation matrix print np.corrcoef(close, rowvar=False) #", "table as a matrix close = np.matrix(close) # Print the correlation matrix c", "stocks I want to analyze techTickers = ['GOOGL','MSFT','FB','T','VZ'] # Define the data source", "In[29]: ## ALL INDUSTRIES # Call the stocks I want to analyze financeTickers", "a matrix close = np.asmatrix(close) # Print the TECH correlation matrix print np.corrcoef(close,", "into one big dict json_data = { \"nodes\": nodes, \"links\": links } network", "elif i < len(financeTickers) + len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\": 2}) elif i < len(financeTickers)", "basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers = ['AAPL','PG','BUD','KO','TM'] # group all tickers together AllTickers =", "by defining relative gain. starting at the first price 1.0 close = close/close.iloc[0,", "data.DataReader(servicesTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This will", "the panel_data close = panel_data.loc['Close'] # Getting all weekdays between 01/01/2000 and 12/31/2016", "need to do is reindex close using all_weekdays as the new index close", "a = np.zeros(len(AllTickers)) for i in range(len(AllTickers)): a[i] = np.sum(abs(close[i])) print a -", "print np.corrcoef(close, rowvar=False) # In[6]: ## CONSUMER GOODS # Call the stocks I", "- 1 # In[8]: ## ALL INDUSTRIES # Call the stocks I want", "'2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(AllTickers, data_source, start_date,", "rowvar=False) # In[45]: ## ALL INDUSTRIES # Call the stocks I want to", "and print the correlation matrix in absolute values close = np.corrcoef(close, rowvar=False) a", "# The index in this DataFrame is the major index of the panel_data", "a Pandas DataFrame # The index in this DataFrame is the major index", "the table as a matrix close = np.asmatrix(close) # Print the FINANCE correlation", "The index in this DataFrame is the major index of the panel_data close", "Define the table as a matrix close = np.asmatrix(close) # Print the SERVICES", "proper format for the network nodes = [] # define the different industries", "matrix in absolute values close = np.corrcoef(close, rowvar=False) a = np.zeros(len(AllTickers)) for i", "= '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(AllTickers, data_source,", "matrix c = np.corrcoef(close, rowvar=False) # manipulate the data so that I can", "of the panel_data close = panel_data.loc['Close'] # Getting all weekdays between 01/01/2000 and", "we align the existing prices in adj_close with our new set of dates?", "a matrix close = np.matrix(close) # Print the correlation matrix c = np.corrcoef(close,", "will return a Pandas DataFrame # The index in this DataFrame is the", "using all_weekdays as the new index close = close.reindex(all_weekdays) # Drop the dates", "# Print the GOODS correlation matrix print np.corrcoef(close, rowvar=False) # In[45]: ## ALL", "else: nodes.append({\"id\":AllTickers[i], \"group\": 5}) links = [] # Go through the stocks and", "Get the data panel_data = data.DataReader(financeTickers, data_source, start_date, end_date) # Getting just the", "json_data = { \"nodes\": nodes, \"links\": links } network = json.dumps(json_data) # copied", "the stocks I want to analyze consumerTickers = ['AAPL','PG','BUD','KO','TM'] # Define the data", "= '2018-01-01' # Get the data panel_data = data.DataReader(basicTickers, data_source, start_date, end_date) #", "index of the panel_data close = panel_data.loc['Close'] # Getting all weekdays between 01/01/2000", "index in this DataFrame is the major index of the panel_data close =", "Call the stocks I want to analyze servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] # Define the", "= data.DataReader(servicesTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This", "range(len(AllTickers)): for j in range(1,len(AllTickers) - i): links.append({\"source\" : AllTickers[i],\"target\" : AllTickers[i +", "in range(len(AllTickers)): if i < len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\": 1}) elif i < len(financeTickers)", "matrix close = np.asmatrix(close) # Print the FINANCE correlation matrix print np.corrcoef(close, rowvar=False)", "this DataFrame is the major index of the panel_data. close = panel_data.loc['Close'] #", "import pandas as pd import numpy as np import json import io #", "GOODS correlation matrix print np.corrcoef(close, rowvar=False) # In[45]: ## ALL INDUSTRIES # Call", "'2018-01-01' # Get the data panel_data = data.DataReader(techTickers, data_source, start_date, end_date) # Getting", "the MATERIALS correlation matrix print np.corrcoef(close, rowvar=False) # In[6]: ## CONSUMER GOODS #", "= np.corrcoef(close, rowvar=False) a = np.zeros(len(AllTickers)) for i in range(len(AllTickers)): a[i] = np.sum(abs(close[i]))", "nodes.append({\"id\":AllTickers[i], \"group\": 5}) links = [] # Go through the stocks and link", "## ALL INDUSTRIES # Call the stocks I want to analyze financeTickers =", "a downloaded json file. print network # In[29]: ## ALL INDUSTRIES # Call", "} network = json.dumps(json_data) # copied this print into a downloaded json file.", "TECH INDUSTRY # Call the stocks I want to analyze techTickers = ['GOOGL','MSFT','FB','T','VZ']", "analyze financeTickers = ['JPM','BAC','WFC','V','C'] techTickers = ['GOOGL','MSFT','FB','T','VZ'] servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers = ['XOM','RDS-B','PTR','CVX','BP']", "nodes.append({\"id\":AllTickers[i], \"group\": 3}) elif i < len(financeTickers) + len(techTickers) + len(servicesTickers) + len(basicTickers):", "+ len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\": 3}) elif i < len(financeTickers) + len(techTickers) + len(servicesTickers)", "json.dumps(json_data) # copied this print into a downloaded json file. print network #", "np.asmatrix(close) # Print the GOODS correlation matrix print np.corrcoef(close, rowvar=False) # In[45]: ##", "Print the SERVICES correlation matrix print np.corrcoef(close, rowvar=False) # In[5]: ## BASIC MATERIALS", "\"nodes\": nodes, \"links\": links } network = json.dumps(json_data) # copied this print into", "Define the data source data_source = 'yahoo' # Define the time-scale start_date =", "12/31/2016 all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B') # How do we align the existing", "servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers = ['AAPL','PG','BUD','KO','TM'] # group all tickers", "= ['AAPL','PG','BUD','KO','TM'] # group all tickers together AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define the", "the data so that I can output the proper format for the network", "different groups for i in range(len(AllTickers)): if i < len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\": 1})", "want to analyze servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] # Define the data source data_source =", "as a matrix close = np.asmatrix(close) # Print the SERVICES correlation matrix print", "consumerTickers = ['AAPL','PG','BUD','KO','TM'] # group all tickers together AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define", "end_date = '2018-01-01' # Get the data panel_data = data.DataReader(consumerTickers, data_source, start_date, end_date)", "Call the stocks I want to analyze techTickers = ['GOOGL','MSFT','FB','T','VZ'] # Define the", "data panel_data = data.DataReader(AllTickers, data_source, start_date, end_date) # Getting just the adjusted closing", "len(financeTickers) + len(techTickers) + len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\": 3}) elif i < len(financeTickers) +", "= pd.date_range(start=start_date, end=end_date, freq='B') # How do we align the existing prices in", "end_date) # Getting just the adjusted closing prices. This will return a Pandas", "is reindex close using all_weekdays as the new index close = close.reindex(all_weekdays) #", "data panel_data = data.DataReader(basicTickers, data_source, start_date, end_date) # Getting just the adjusted closing", "and 12/31/2016 all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B') # How do we align the", "rowvar=False) # In[4]: ## SERVICES # Call the stocks I want to analyze", "close = panel_data.loc['Close'] # Getting all weekdays between 01/01/2000 and 12/31/2016 all_weekdays =", "import matplotlib.pyplot as plt import pandas as pd import numpy as np import", "TECH correlation matrix print np.corrcoef(close, rowvar=False) # In[4]: ## SERVICES # Call the", "want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] # Define the data source data_source =", "Define the table as a matrix close = np.matrix(close) # Print the correlation", "with our new set of dates? # All we need to do is", "np.corrcoef(close, rowvar=False) # In[4]: ## SERVICES # Call the stocks I want to", "# In[4]: ## SERVICES # Call the stocks I want to analyze servicesTickers", "{ \"nodes\": nodes, \"links\": links } network = json.dumps(json_data) # copied this print", "a matrix close = np.asmatrix(close) # Print the SERVICES correlation matrix print np.corrcoef(close,", "'yahoo' # Define the time-scale start_date = '2000-01-01' end_date = '2018-01-01' # Get", "file. print network # In[29]: ## ALL INDUSTRIES # Call the stocks I", "close = close.dropna(axis=0, how='any') # Define the table as a matrix close =", "close = close.dropna(axis=0, how='any') # normalize de data by defining relative gain. starting", "+ len(techTickers) + len(servicesTickers) + len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\": 4}) else: nodes.append({\"id\":AllTickers[i], \"group\": 5})", "start_date, end_date) # Getting just the adjusted closing prices. This will return a", "= '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(techTickers, data_source,", "## CONSUMER GOODS # Call the stocks I want to analyze consumerTickers =", "rowvar=False) # In[5]: ## BASIC MATERIALS # Call the stocks I want to", "In[5]: ## BASIC MATERIALS # Call the stocks I want to analyze basicTickers", "the data source data_source = 'yahoo' # Define the time-scale start_date = '2000-01-01'", "a matrix close = np.matrix(close) # Define and print the correlation matrix in", "In[45]: ## ALL INDUSTRIES # Call the stocks I want to analyze financeTickers", "In[3]: ## TECH INDUSTRY # Call the stocks I want to analyze techTickers", "so that I can output the proper format for the network nodes =", "# Drop the dates where one of the companies wasn't public yet close", "to analyze servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] # Define the data source data_source = 'yahoo'", "a matrix close = np.asmatrix(close) # Print the MATERIALS correlation matrix print np.corrcoef(close,", "= np.corrcoef(close, rowvar=False) # manipulate the data so that I can output the", "rowvar=False) a = np.zeros(len(AllTickers)) for i in range(len(AllTickers)): a[i] = np.sum(abs(close[i])) print a", "= np.sum(abs(close[i])) print a - 1 # In[8]: ## ALL INDUSTRIES # Call", "for the network nodes = [] # define the different industries by seperating", "links.append({\"source\" : AllTickers[i],\"target\" : AllTickers[i + j],\"value\" : c[i,i+j]}) # bring together the", "to analyze basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] # Define the data source data_source = 'yahoo'", "DataFrame is the major index of the panel_data. close = panel_data.loc['Close'] # Getting", "for i in range(len(AllTickers)): a[i] = np.sum(abs(close[i])) print a - 1 # In[8]:", "seperating it in different groups for i in range(len(AllTickers)): if i < len(financeTickers):", "Print the MATERIALS correlation matrix print np.corrcoef(close, rowvar=False) # In[6]: ## CONSUMER GOODS", "print into a downloaded json file. print network # In[29]: ## ALL INDUSTRIES", "= data.DataReader(consumerTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This", "companies wasn't public yet close = close.dropna(axis=0, how='any') # Define the table as", "matrix print np.corrcoef(close, rowvar=False) # In[4]: ## SERVICES # Call the stocks I", "'2018-01-01' # Get the data panel_data = data.DataReader(basicTickers, data_source, start_date, end_date) # Getting", "Define the table as a matrix close = np.asmatrix(close) # Print the MATERIALS", "table as a matrix close = np.asmatrix(close) # Print the FINANCE correlation matrix", "= '2018-01-01' # Get the data panel_data = data.DataReader(consumerTickers, data_source, start_date, end_date) #", "the different industries by seperating it in different groups for i in range(len(AllTickers)):", "correlation matrix in absolute values close = np.corrcoef(close, rowvar=False) a = np.zeros(len(AllTickers)) for", "= [] # Go through the stocks and link the stocks and connections", ": AllTickers[i + j],\"value\" : c[i,i+j]}) # bring together the two dictionaries into", "Get the data panel_data = data.DataReader(consumerTickers, data_source, start_date, end_date) # Getting just the", "ALL INDUSTRIES # Call the stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C']", "['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define the data source data_source = 'yahoo' # Define the time-scale", "len(techTickers): nodes.append({\"id\":AllTickers[i], \"group\": 2}) elif i < len(financeTickers) + len(techTickers) + len(servicesTickers): nodes.append({\"id\":AllTickers[i],", "adj_close with our new set of dates? # All we need to do", "start_date = '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(consumerTickers,", "analyze financeTickers = ['JPM','BAC','WFC','V','C'] # Define the data source data_source = 'yahoo' #", "how='any') # normalize de data by defining relative gain. starting at the first", "= data.DataReader(AllTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This", "end_date = '2018-01-01' # Get the data panel_data = data.DataReader(techTickers, data_source, start_date, end_date)", "nodes.append({\"id\":AllTickers[i], \"group\": 2}) elif i < len(financeTickers) + len(techTickers) + len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\":", "the adjusted closing prices. This will return a Pandas DataFrame # The index", "network nodes = [] # define the different industries by seperating it in", "# Get the data panel_data = data.DataReader(consumerTickers, data_source, start_date, end_date) # Getting just", "i < len(financeTickers) + len(techTickers) + len(servicesTickers) + len(basicTickers): nodes.append({\"id\":AllTickers[i], \"group\": 4}) else:", "\"group\": 4}) else: nodes.append({\"id\":AllTickers[i], \"group\": 5}) links = [] # Go through the", "as a matrix close = np.asmatrix(close) # Print the MATERIALS correlation matrix print", "# define the different industries by seperating it in different groups for i", "as plt import pandas as pd import numpy as np import json import", "\"group\": 2}) elif i < len(financeTickers) + len(techTickers) + len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\": 3})", "tickers together AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define the data source data_source = 'yahoo'", "= ['GOOGL','MSFT','FB','T','VZ'] servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers = ['AAPL','PG','BUD','KO','TM'] # group", "network # In[29]: ## ALL INDUSTRIES # Call the stocks I want to", "group all tickers together AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define the data source data_source", "the stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] # Define the data", "data.DataReader(consumerTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices. This will", "do is reindex close using all_weekdays as the new index close = close.reindex(all_weekdays)", "Define the table as a matrix close = np.asmatrix(close) # Print the GOODS", "dict json_data = { \"nodes\": nodes, \"links\": links } network = json.dumps(json_data) #", "len(techTickers) + len(servicesTickers): nodes.append({\"id\":AllTickers[i], \"group\": 3}) elif i < len(financeTickers) + len(techTickers) +", "index in this DataFrame is the major index of the panel_data. close =", "panel_data = data.DataReader(AllTickers, data_source, start_date, end_date) # Getting just the adjusted closing prices.", "matrix close = np.matrix(close) # Print the correlation matrix c = np.corrcoef(close, rowvar=False)", "AllTickers = ['JPM','BAC','WFC','V','C','GOOGL','MSFT','FB','T','VZ','AMZN','BABA','WMT','HD','CMCSA','XOM','RDS-B','PTR','CVX','BP','AAPL','PG','BUD','KO','TM'] # Define the data source data_source = 'yahoo' # Define", "= np.asmatrix(close) # Print the GOODS correlation matrix print np.corrcoef(close, rowvar=False) # In[45]:", "Get the data panel_data = data.DataReader(AllTickers, data_source, start_date, end_date) # Getting just the", "close = np.matrix(close) # Define and print the correlation matrix in absolute values", "SERVICES # Call the stocks I want to analyze servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] #", "data panel_data = data.DataReader(financeTickers, data_source, start_date, end_date) # Getting just the adjusted closing", "# Get the data panel_data = data.DataReader(techTickers, data_source, start_date, end_date) # Getting just", "print np.corrcoef(close, rowvar=False) # In[4]: ## SERVICES # Call the stocks I want", "['XOM','RDS-B','PTR','CVX','BP'] # Define the data source data_source = 'yahoo' # Define the time-scale", "= ['JPM','BAC','WFC','V','C'] techTickers = ['GOOGL','MSFT','FB','T','VZ'] servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers =", "CONSUMER GOODS # Call the stocks I want to analyze consumerTickers = ['AAPL','PG','BUD','KO','TM']", "= np.asmatrix(close) # Print the TECH correlation matrix print np.corrcoef(close, rowvar=False) # In[4]:", "matrix print np.corrcoef(close, rowvar=False) # In[5]: ## BASIC MATERIALS # Call the stocks", "# Define and print the correlation matrix in absolute values close = np.corrcoef(close,", "the stocks I want to analyze basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] # Define the data", "= ['AMZN','BABA','WMT','HD','CMCSA'] basicTickers = ['XOM','RDS-B','PTR','CVX','BP'] consumerTickers = ['AAPL','PG','BUD','KO','TM'] # group all tickers together", "Get the data panel_data = data.DataReader(servicesTickers, data_source, start_date, end_date) # Getting just the", "the stocks I want to analyze techTickers = ['GOOGL','MSFT','FB','T','VZ'] # Define the data", "the major index of the panel_data close = panel_data.loc['Close'] # Getting all weekdays", "# In[45]: ## ALL INDUSTRIES # Call the stocks I want to analyze", "['AMZN','BABA','WMT','HD','CMCSA'] # Define the data source data_source = 'yahoo' # Define the time-scale", "# Call the stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] techTickers =", "# Call the stocks I want to analyze consumerTickers = ['AAPL','PG','BUD','KO','TM'] # Define", "how='any') # Define the table as a matrix close = np.asmatrix(close) # Print", "stocks I want to analyze servicesTickers = ['AMZN','BABA','WMT','HD','CMCSA'] # Define the data source", "# Define the table as a matrix close = np.matrix(close) # Define and", "range(len(AllTickers)): if i < len(financeTickers): nodes.append({\"id\":AllTickers[i], \"group\": 1}) elif i < len(financeTickers) +", "panel_data close = panel_data.loc['Close'] # Getting all weekdays between 01/01/2000 and 12/31/2016 all_weekdays", "all_weekdays as the new index close = close.reindex(all_weekdays) # Drop the dates where", "= [] # define the different industries by seperating it in different groups", "source data_source = 'yahoo' # Define the time-scale start_date = '2000-01-01' end_date =", "coding: utf-8 # In[1]: from pandas_datareader import data import matplotlib.pyplot as plt import", "public yet close = close.dropna(axis=0, how='any') # Define the table as a matrix", "downloaded json file. print network # In[29]: ## ALL INDUSTRIES # Call the", "manipulate the data so that I can output the proper format for the", "I want to analyze techTickers = ['GOOGL','MSFT','FB','T','VZ'] # Define the data source data_source", "in adj_close with our new set of dates? # All we need to", "= '2000-01-01' end_date = '2018-01-01' # Get the data panel_data = data.DataReader(consumerTickers, data_source,", "the table as a matrix close = np.matrix(close) # Define and print the", "import data import matplotlib.pyplot as plt import pandas as pd import numpy as", "# In[5]: ## BASIC MATERIALS # Call the stocks I want to analyze", "# Get the data panel_data = data.DataReader(basicTickers, data_source, start_date, end_date) # Getting just", "normalize de data by defining relative gain. starting at the first price 1.0", "link the stocks and connections with eachother to the correlation matrix. for i", "stocks I want to analyze financeTickers = ['JPM','BAC','WFC','V','C'] techTickers = ['GOOGL','MSFT','FB','T','VZ'] servicesTickers =", "index of the panel_data. close = panel_data.loc['Close'] # Getting all weekdays between 01/01/2000", "close = np.asmatrix(close) # Print the MATERIALS correlation matrix print np.corrcoef(close, rowvar=False) #", "Print the TECH correlation matrix print np.corrcoef(close, rowvar=False) # In[4]: ## SERVICES #" ]
[ "18-5-15 下午5:32 # @Author : <NAME> # @File : classifier.py # @Software: tfwrapper", "# -*- coding: utf-8 -*- # @Time : 18-5-15 下午5:32 # @Author :", "keras.layers import Dense from keras.models import Sequential from keras.datasets import mnist (x_train,y_train),(x_test,y_test) =", "# @File : classifier.py # @Software: tfwrapper from keras.layers import Dense from keras.models", "-*- # @Time : 18-5-15 下午5:32 # @Author : <NAME> # @File :", "print('Origin train data shape',x_train.shape(),y_train.shape()) x_train = x_train / 255 x_test = x_test /255", "# @Author : <NAME> # @File : classifier.py # @Software: tfwrapper from keras.layers", "@Time : 18-5-15 下午5:32 # @Author : <NAME> # @File : classifier.py #", "import Sequential from keras.datasets import mnist (x_train,y_train),(x_test,y_test) = mnist.load_data() print('Origin train data shape',x_train.shape(),y_train.shape())", "mnist (x_train,y_train),(x_test,y_test) = mnist.load_data() print('Origin train data shape',x_train.shape(),y_train.shape()) x_train = x_train / 255", ": <NAME> # @File : classifier.py # @Software: tfwrapper from keras.layers import Dense", "keras.datasets import mnist (x_train,y_train),(x_test,y_test) = mnist.load_data() print('Origin train data shape',x_train.shape(),y_train.shape()) x_train = x_train", ": 18-5-15 下午5:32 # @Author : <NAME> # @File : classifier.py # @Software:", "classifier.py # @Software: tfwrapper from keras.layers import Dense from keras.models import Sequential from", "@File : classifier.py # @Software: tfwrapper from keras.layers import Dense from keras.models import", "coding: utf-8 -*- # @Time : 18-5-15 下午5:32 # @Author : <NAME> #", "python3 # -*- coding: utf-8 -*- # @Time : 18-5-15 下午5:32 # @Author", "= mnist.load_data() print('Origin train data shape',x_train.shape(),y_train.shape()) x_train = x_train / 255 x_test =", "from keras.datasets import mnist (x_train,y_train),(x_test,y_test) = mnist.load_data() print('Origin train data shape',x_train.shape(),y_train.shape()) x_train =", "@Software: tfwrapper from keras.layers import Dense from keras.models import Sequential from keras.datasets import", "mnist.load_data() print('Origin train data shape',x_train.shape(),y_train.shape()) x_train = x_train / 255 x_test = x_test", "下午5:32 # @Author : <NAME> # @File : classifier.py # @Software: tfwrapper from", ": classifier.py # @Software: tfwrapper from keras.layers import Dense from keras.models import Sequential", "from keras.models import Sequential from keras.datasets import mnist (x_train,y_train),(x_test,y_test) = mnist.load_data() print('Origin train", "keras.models import Sequential from keras.datasets import mnist (x_train,y_train),(x_test,y_test) = mnist.load_data() print('Origin train data", "<filename>ml/classifier.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 18-5-15 下午5:32", "tfwrapper from keras.layers import Dense from keras.models import Sequential from keras.datasets import mnist", "import Dense from keras.models import Sequential from keras.datasets import mnist (x_train,y_train),(x_test,y_test) = mnist.load_data()", "Dense from keras.models import Sequential from keras.datasets import mnist (x_train,y_train),(x_test,y_test) = mnist.load_data() print('Origin", "# @Software: tfwrapper from keras.layers import Dense from keras.models import Sequential from keras.datasets", "Sequential from keras.datasets import mnist (x_train,y_train),(x_test,y_test) = mnist.load_data() print('Origin train data shape',x_train.shape(),y_train.shape()) x_train", "import mnist (x_train,y_train),(x_test,y_test) = mnist.load_data() print('Origin train data shape',x_train.shape(),y_train.shape()) x_train = x_train /", "@Author : <NAME> # @File : classifier.py # @Software: tfwrapper from keras.layers import", "-*- coding: utf-8 -*- # @Time : 18-5-15 下午5:32 # @Author : <NAME>", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Time : 18-5-15 下午5:32 #", "from keras.layers import Dense from keras.models import Sequential from keras.datasets import mnist (x_train,y_train),(x_test,y_test)", "utf-8 -*- # @Time : 18-5-15 下午5:32 # @Author : <NAME> # @File", "<NAME> # @File : classifier.py # @Software: tfwrapper from keras.layers import Dense from", "# @Time : 18-5-15 下午5:32 # @Author : <NAME> # @File : classifier.py", "(x_train,y_train),(x_test,y_test) = mnist.load_data() print('Origin train data shape',x_train.shape(),y_train.shape()) x_train = x_train / 255 x_test" ]
[ "channels['channels'] if channel['is_member']] # print(\"available channels:\") # pprint(channels) print(\"I am member of {}", "bot is \"\"\" # print(\"DEBUG: my channels: {}\".format(g_member_channel)) for event in events: #", "returns None \"\"\" matches = search(self.MENTION_REGEX, message_text) # the first group contains the", "of self.g_member_channel = [] # context: in which thread the link was already", "where the bot is member if event[\"type\"] != \"message\" or \"subtype\" in event", "= [] for i in finditer(pattern, message): value = i.group(1) if value not", "get_list_of_channels(self): \"\"\" print the list of available channels \"\"\" channels = self.slack_client.api_call( \"channels.list\",", "None) def analyse_message(self, message): \"\"\" find matching sub string in the message and", "a list of formatted links \"\"\" pattern = self.MATCH_PATTERN matchs = [] for", "thread_ts) if not analysed_message_no_repeat: return Message(None, None, None) return Message(event[\"channel\"], thread_ts, analysed_message_no_repeat, self.LINK_URL)", "bot is member of a given channel \"\"\" return channel in [channel['id'] for", "finditer from copy import copy # from pprint import pprint from slackclient import", "subtype which are posted in channel where the bot is \"\"\" # print(\"DEBUG:", "\"\"\" # pprint(self.message_context) no_repeat_messages = copy(analysed_messages) for message in analysed_messages: if thread_ts in", "reading from RTM self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\" self.LINK_URL = conf.link_url self.MATCH_PATTERN = conf.match_pattern #", "returns the user ID which was mentioned. If there is no direct mention,", "# starterbot's user ID in Slack: value is assigned # after the bot", "contains the username, # the second group contains the remaining message return (matches.group(1),", "self.bot_loop() else: print(\"Connection failed. Exception traceback printed above.\") def bot_loop(self): # Read bot's", "sub string in the message and returns a list of formatted links \"\"\"", "in self.g_member_channel]))) def check_if_member(self, channel): \"\"\" checking if the bot is member of", "print(\"DEBUG: my channels: {}\".format(g_member_channel)) for event in events: # pprint(event) # Parsing only", "self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def parse_direct_mention(self, message_text): \"\"\" Finds a direct mention", "def parse_events_in_channel(self, events): \"\"\" Selecting events of type message with no subtype which", "string in the message and returns a list of formatted links \"\"\" pattern", "by calling Web API method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True: bot_message = self.parse_events_in_channel(self.slack_client.rtm_read()) if", "running!\") self.get_list_of_channels() self.bot_loop() else: print(\"Connection failed. Exception traceback printed above.\") def bot_loop(self): #", "# analyse message to see if we can suggest some links analysed_message =", "matches else (None, None) def get_list_of_channels(self): \"\"\" print the list of available channels", "def get_list_of_channels(self): \"\"\" print the list of available channels \"\"\" channels = self.slack_client.api_call(", "first group contains the username, # the second group contains the remaining message", "for channel in channels['channels'] if channel['is_member']] # print(\"available channels:\") # pprint(channels) print(\"I am", "was already provided self.message_context = {} def chat(self): if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot connected", "import copy # from pprint import pprint from slackclient import SlackClient from bot.message", "# pprint(event) # Parsing only messages in the channels where the bot is", "matchs def dont_repeat_in_thread(self, analysed_messages, thread_ts): \"\"\" Remove message from analysed message if it", "which thread the link was already provided self.message_context = {} def chat(self): if", "the bot is member of a given channel \"\"\" return channel in [channel['id']", "message to see if we can suggest some links analysed_message = self.analyse_message(event['text']) thread_ts", "True: bot_message = self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def parse_direct_mention(self, message_text): \"\"\" Finds", "\"\"\" # Add message to the message context to avoid # repeating same", "matches = search(self.MENTION_REGEX, message_text) # the first group contains the username, # the", "pprint import pprint from slackclient import SlackClient from bot.message import Message class Bot:", "in finditer(pattern, message): value = i.group(1) if value not in matchs: matchs.append(value) if", "= None # constants self.RTM_READ_DELAY = 2 # 1 second delay between reading", "events): \"\"\" Selecting events of type message with no subtype which are posted", "find matching sub string in the message and returns a list of formatted", "self.starterbot_id = None # constants self.RTM_READ_DELAY = 2 # 1 second delay between", "in self.g_member_channel] def parse_events_in_channel(self, events): \"\"\" Selecting events of type message with no", "in Slack: value is assigned # after the bot starts up self.starterbot_id =", "is member of self.g_member_channel = [] # context: in which thread the link", "for me: type:{}\".format(event)) continue # analyse message to see if we can suggest", "bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def parse_direct_mention(self, message_text): \"\"\" Finds a direct mention (a mention", "\"\"\" find matching sub string in the message and returns a list of", "continue # analyse message to see if we can suggest some links analysed_message", "def check_if_member(self, channel): \"\"\" checking if the bot is member of a given", "the message and returns a list of formatted links \"\"\" pattern = self.MATCH_PATTERN", "parse_events_in_channel(self, events): \"\"\" Selecting events of type message with no subtype which are", "channel în a thread \"\"\" # Add message to the message context to", "respond_in_thread(self, bot_message): \"\"\"Sends the response back to the channel în a thread \"\"\"", "= {} def chat(self): if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot connected and running!\") self.get_list_of_channels() self.bot_loop()", "given channel \"\"\" return channel in [channel['id'] for channel in self.g_member_channel] def parse_events_in_channel(self,", "\"\"\" checking if the bot is member of a given channel \"\"\" return", "in self.message_context.keys(): if message in self.message_context[thread_ts]: no_repeat_messages.remove(message) return no_repeat_messages def respond_in_thread(self, bot_message): \"\"\"Sends", "if message in self.message_context[thread_ts]: no_repeat_messages.remove(message) return no_repeat_messages def respond_in_thread(self, bot_message): \"\"\"Sends the response", "= self.slack_client.api_call( \"channels.list\", exclude_archived=1 ) self.g_member_channel = [channel for channel in channels['channels'] if", "If there is no direct mention, returns None \"\"\" matches = search(self.MENTION_REGEX, message_text)", "am member of {} channels: {}\" .format(len(self.g_member_channel), \",\".join([c['name'] for c in self.g_member_channel]))) def", "list of formatted links \"\"\" pattern = self.MATCH_PATTERN matchs = [] for i", "self.message_context = {} def chat(self): if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot connected and running!\") self.get_list_of_channels()", "conf.match_pattern # list of channel the bot is member of self.g_member_channel = []", "response back to the channel în a thread \"\"\" # Add message to", "if channel['is_member']] # print(\"available channels:\") # pprint(channels) print(\"I am member of {} channels:", "= [channel for channel in channels['channels'] if channel['is_member']] # print(\"available channels:\") # pprint(channels)", "print the list of available channels \"\"\" channels = self.slack_client.api_call( \"channels.list\", exclude_archived=1 )", "= self.analyse_message(event['text']) thread_ts = event['ts'] if 'thread_ts' in event.keys(): thread_ts = event['thread_ts'] if", "returns a list of formatted links \"\"\" pattern = self.MATCH_PATTERN matchs = []", "# repeating same message in a thread if bot_message.thread_ts not in self.message_context.keys(): self.message_context[bot_message.thread_ts]", "def dont_repeat_in_thread(self, analysed_messages, thread_ts): \"\"\" Remove message from analysed message if it was", "message if it was already sent in the same message thread. \"\"\" #", "event['ts'] if 'thread_ts' in event.keys(): thread_ts = event['thread_ts'] if not analysed_message: return Message(None,", "value not in matchs: matchs.append(value) if not len(matchs): return return matchs def dont_repeat_in_thread(self,", "no_repeat_messages = copy(analysed_messages) for message in analysed_messages: if thread_ts in self.message_context.keys(): if message", "channel the bot is member of self.g_member_channel = [] # context: in which", "me: type:{}\".format(event)) continue # analyse message to see if we can suggest some", "which was mentioned. If there is no direct mention, returns None \"\"\" matches", "# the first group contains the username, # the second group contains the", "and returns a list of formatted links \"\"\" pattern = self.MATCH_PATTERN matchs =", "message_text) # the first group contains the username, # the second group contains", "not analysed_message: return Message(None, None, None) analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts) if not analysed_message_no_repeat:", "message_text): \"\"\" Finds a direct mention (a mention that is at the beginning)", "is at the beginning) in message text and returns the user ID which", "channel \"\"\" return channel in [channel['id'] for channel in self.g_member_channel] def parse_events_in_channel(self, events):", "only messages in the channels where the bot is member if event[\"type\"] !=", "of {} channels: {}\" .format(len(self.g_member_channel), \",\".join([c['name'] for c in self.g_member_channel]))) def check_if_member(self, channel):", "sent in the same message thread. \"\"\" # pprint(self.message_context) no_repeat_messages = copy(analysed_messages) for", "analysed_message_no_repeat, self.LINK_URL) return Message(None, None, None) def analyse_message(self, message): \"\"\" find matching sub", "method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True: bot_message = self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def", "in event.keys(): thread_ts = event['thread_ts'] if not analysed_message: return Message(None, None, None) analysed_message_no_repeat", "links \"\"\" pattern = self.MATCH_PATTERN matchs = [] for i in finditer(pattern, message):", "c in self.g_member_channel]))) def check_if_member(self, channel): \"\"\" checking if the bot is member", "message): value = i.group(1) if value not in matchs: matchs.append(value) if not len(matchs):", "where the bot is \"\"\" # print(\"DEBUG: my channels: {}\".format(g_member_channel)) for event in", "username, # the second group contains the remaining message return (matches.group(1), matches.group(2).strip()) if", "i in finditer(pattern, message): value = i.group(1) if value not in matchs: matchs.append(value)", "(None, None) def get_list_of_channels(self): \"\"\" print the list of available channels \"\"\" channels", "parse_direct_mention(self, message_text): \"\"\" Finds a direct mention (a mention that is at the", "client self.slack_client = SlackClient(conf.slack_bot_token) # starterbot's user ID in Slack: value is assigned", "not self.check_if_member(event[\"channel\"]): # print(\"not for me: type:{}\".format(event)) continue # analyse message to see", "bot_message): \"\"\"Sends the response back to the channel în a thread \"\"\" #", "posted in channel where the bot is \"\"\" # print(\"DEBUG: my channels: {}\".format(g_member_channel))", "mention, returns None \"\"\" matches = search(self.MENTION_REGEX, message_text) # the first group contains", "context: in which thread the link was already provided self.message_context = {} def", "channel['is_member']] # print(\"available channels:\") # pprint(channels) print(\"I am member of {} channels: {}\"", "checking if the bot is member of a given channel \"\"\" return channel", "provided self.message_context = {} def chat(self): if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot connected and running!\")", "2 # 1 second delay between reading from RTM self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\" self.LINK_URL", "= self.MATCH_PATTERN matchs = [] for i in finditer(pattern, message): value = i.group(1)", "def respond_in_thread(self, bot_message): \"\"\"Sends the response back to the channel în a thread", "member of self.g_member_channel = [] # context: in which thread the link was", "channel): \"\"\" checking if the bot is member of a given channel \"\"\"", "self.g_member_channel] def parse_events_in_channel(self, events): \"\"\" Selecting events of type message with no subtype", "of channel the bot is member of self.g_member_channel = [] # context: in", "conf): # instantiate Slack client self.slack_client = SlackClient(conf.slack_bot_token) # starterbot's user ID in", "contains the remaining message return (matches.group(1), matches.group(2).strip()) if matches else (None, None) def", "the username, # the second group contains the remaining message return (matches.group(1), matches.group(2).strip())", "\"\"\" matches = search(self.MENTION_REGEX, message_text) # the first group contains the username, #", "(matches.group(1), matches.group(2).strip()) if matches else (None, None) def get_list_of_channels(self): \"\"\" print the list", "import sleep from re import search, finditer from copy import copy # from", "ID which was mentioned. If there is no direct mention, returns None \"\"\"", "= self.dont_repeat_in_thread(analysed_message, thread_ts) if not analysed_message_no_repeat: return Message(None, None, None) return Message(event[\"channel\"], thread_ts,", "pprint(self.message_context) no_repeat_messages = copy(analysed_messages) for message in analysed_messages: if thread_ts in self.message_context.keys(): if", "Message(event[\"channel\"], thread_ts, analysed_message_no_repeat, self.LINK_URL) return Message(None, None, None) def analyse_message(self, message): \"\"\" find", "self.dont_repeat_in_thread(analysed_message, thread_ts) if not analysed_message_no_repeat: return Message(None, None, None) return Message(event[\"channel\"], thread_ts, analysed_message_no_repeat,", "the response back to the channel în a thread \"\"\" # Add message", "second group contains the remaining message return (matches.group(1), matches.group(2).strip()) if matches else (None,", "message to the message context to avoid # repeating same message in a", "event or \\ not self.check_if_member(event[\"channel\"]): # print(\"not for me: type:{}\".format(event)) continue # analyse", "sleep(self.RTM_READ_DELAY) def parse_direct_mention(self, message_text): \"\"\" Finds a direct mention (a mention that is", "back to the channel în a thread \"\"\" # Add message to the", "from slackclient import SlackClient from bot.message import Message class Bot: def __init__(self, conf):", "`auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True: bot_message = self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def parse_direct_mention(self,", "matchs = [] for i in finditer(pattern, message): value = i.group(1) if value", "failed. Exception traceback printed above.\") def bot_loop(self): # Read bot's user ID by", "starts up self.starterbot_id = None # constants self.RTM_READ_DELAY = 2 # 1 second", "from re import search, finditer from copy import copy # from pprint import", "[channel for channel in channels['channels'] if channel['is_member']] # print(\"available channels:\") # pprint(channels) print(\"I", "!= \"message\" or \"subtype\" in event or \\ not self.check_if_member(event[\"channel\"]): # print(\"not for", "if value not in matchs: matchs.append(value) if not len(matchs): return return matchs def", "the message context to avoid # repeating same message in a thread if", "if not analysed_message: return Message(None, None, None) analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts) if not", "None) def get_list_of_channels(self): \"\"\" print the list of available channels \"\"\" channels =", "while True: bot_message = self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def parse_direct_mention(self, message_text): \"\"\"", "same message in a thread if bot_message.thread_ts not in self.message_context.keys(): self.message_context[bot_message.thread_ts] = []", "exclude_archived=1 ) self.g_member_channel = [channel for channel in channels['channels'] if channel['is_member']] # print(\"available", "return matchs def dont_repeat_in_thread(self, analysed_messages, thread_ts): \"\"\" Remove message from analysed message if", "if matches else (None, None) def get_list_of_channels(self): \"\"\" print the list of available", "some links analysed_message = self.analyse_message(event['text']) thread_ts = event['ts'] if 'thread_ts' in event.keys(): thread_ts", "in self.message_context[thread_ts]: no_repeat_messages.remove(message) return no_repeat_messages def respond_in_thread(self, bot_message): \"\"\"Sends the response back to", "that is at the beginning) in message text and returns the user ID", "channels: {}\".format(g_member_channel)) for event in events: # pprint(event) # Parsing only messages in", "SlackClient(conf.slack_bot_token) # starterbot's user ID in Slack: value is assigned # after the", "in channels['channels'] if channel['is_member']] # print(\"available channels:\") # pprint(channels) print(\"I am member of", "the same message thread. \"\"\" # pprint(self.message_context) no_repeat_messages = copy(analysed_messages) for message in", "thread the link was already provided self.message_context = {} def chat(self): if self.slack_client.rtm_connect(with_team_state=False):", "the link was already provided self.message_context = {} def chat(self): if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter", "of formatted links \"\"\" pattern = self.MATCH_PATTERN matchs = [] for i in", "= event['ts'] if 'thread_ts' in event.keys(): thread_ts = event['thread_ts'] if not analysed_message: return", "the user ID which was mentioned. If there is no direct mention, returns", "thread_ts, analysed_message_no_repeat, self.LINK_URL) return Message(None, None, None) def analyse_message(self, message): \"\"\" find matching", "# instantiate Slack client self.slack_client = SlackClient(conf.slack_bot_token) # starterbot's user ID in Slack:", "in event or \\ not self.check_if_member(event[\"channel\"]): # print(\"not for me: type:{}\".format(event)) continue #", "import SlackClient from bot.message import Message class Bot: def __init__(self, conf): # instantiate", "of available channels \"\"\" channels = self.slack_client.api_call( \"channels.list\", exclude_archived=1 ) self.g_member_channel = [channel", "if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot connected and running!\") self.get_list_of_channels() self.bot_loop() else: print(\"Connection failed. Exception", "matchs.append(value) if not len(matchs): return return matchs def dont_repeat_in_thread(self, analysed_messages, thread_ts): \"\"\" Remove", "else (None, None) def get_list_of_channels(self): \"\"\" print the list of available channels \"\"\"", "None # constants self.RTM_READ_DELAY = 2 # 1 second delay between reading from", "time import sleep from re import search, finditer from copy import copy #", "\"\"\" print the list of available channels \"\"\" channels = self.slack_client.api_call( \"channels.list\", exclude_archived=1", "messages in the channels where the bot is member if event[\"type\"] != \"message\"", "the list of available channels \"\"\" channels = self.slack_client.api_call( \"channels.list\", exclude_archived=1 ) self.g_member_channel", "\"channels.list\", exclude_archived=1 ) self.g_member_channel = [channel for channel in channels['channels'] if channel['is_member']] #", "message thread. \"\"\" # pprint(self.message_context) no_repeat_messages = copy(analysed_messages) for message in analysed_messages: if", "event in events: # pprint(event) # Parsing only messages in the channels where", "analyse_message(self, message): \"\"\" find matching sub string in the message and returns a", "în a thread \"\"\" # Add message to the message context to avoid", "and returns the user ID which was mentioned. If there is no direct", "None \"\"\" matches = search(self.MENTION_REGEX, message_text) # the first group contains the username,", "is member if event[\"type\"] != \"message\" or \"subtype\" in event or \\ not", "\"\"\" channels = self.slack_client.api_call( \"channels.list\", exclude_archived=1 ) self.g_member_channel = [channel for channel in", "is \"\"\" # print(\"DEBUG: my channels: {}\".format(g_member_channel)) for event in events: # pprint(event)", "the bot starts up self.starterbot_id = None # constants self.RTM_READ_DELAY = 2 #", "the first group contains the username, # the second group contains the remaining", "self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def parse_direct_mention(self, message_text): \"\"\" Finds a direct mention (a mention that", "bot_message = self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def parse_direct_mention(self, message_text): \"\"\" Finds a", "# print(\"not for me: type:{}\".format(event)) continue # analyse message to see if we", "value = i.group(1) if value not in matchs: matchs.append(value) if not len(matchs): return", "matchs: matchs.append(value) if not len(matchs): return return matchs def dont_repeat_in_thread(self, analysed_messages, thread_ts): \"\"\"", "self.slack_client.api_call( \"channels.list\", exclude_archived=1 ) self.g_member_channel = [channel for channel in channels['channels'] if channel['is_member']]", "copy import copy # from pprint import pprint from slackclient import SlackClient from", "self.MATCH_PATTERN = conf.match_pattern # list of channel the bot is member of self.g_member_channel", "message in analysed_messages: if thread_ts in self.message_context.keys(): if message in self.message_context[thread_ts]: no_repeat_messages.remove(message) return", "return Message(None, None, None) return Message(event[\"channel\"], thread_ts, analysed_message_no_repeat, self.LINK_URL) return Message(None, None, None)", "is member of a given channel \"\"\" return channel in [channel['id'] for channel", "to see if we can suggest some links analysed_message = self.analyse_message(event['text']) thread_ts =", "traceback printed above.\") def bot_loop(self): # Read bot's user ID by calling Web", "connected and running!\") self.get_list_of_channels() self.bot_loop() else: print(\"Connection failed. Exception traceback printed above.\") def", "with no subtype which are posted in channel where the bot is \"\"\"", "Web API method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True: bot_message = self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel: self.respond_in_thread(bot_message)", "self.RTM_READ_DELAY = 2 # 1 second delay between reading from RTM self.MENTION_REGEX =", "self.LINK_URL = conf.link_url self.MATCH_PATTERN = conf.match_pattern # list of channel the bot is", "print(\"available channels:\") # pprint(channels) print(\"I am member of {} channels: {}\" .format(len(self.g_member_channel), \",\".join([c['name']", "links analysed_message = self.analyse_message(event['text']) thread_ts = event['ts'] if 'thread_ts' in event.keys(): thread_ts =", "not analysed_message_no_repeat: return Message(None, None, None) return Message(event[\"channel\"], thread_ts, analysed_message_no_repeat, self.LINK_URL) return Message(None,", "= copy(analysed_messages) for message in analysed_messages: if thread_ts in self.message_context.keys(): if message in", "# context: in which thread the link was already provided self.message_context = {}", "(a mention that is at the beginning) in message text and returns the", "no subtype which are posted in channel where the bot is \"\"\" #", "to avoid # repeating same message in a thread if bot_message.thread_ts not in", "user ID in Slack: value is assigned # after the bot starts up", "Parsing only messages in the channels where the bot is member if event[\"type\"]", "link was already provided self.message_context = {} def chat(self): if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot", "analysed message if it was already sent in the same message thread. \"\"\"", "from RTM self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\" self.LINK_URL = conf.link_url self.MATCH_PATTERN = conf.match_pattern # list", "self.g_member_channel = [] # context: in which thread the link was already provided", "already sent in the same message thread. \"\"\" # pprint(self.message_context) no_repeat_messages = copy(analysed_messages)", "direct mention (a mention that is at the beginning) in message text and", "channel where the bot is \"\"\" # print(\"DEBUG: my channels: {}\".format(g_member_channel)) for event", "user ID by calling Web API method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True: bot_message =", "type message with no subtype which are posted in channel where the bot", "in message text and returns the user ID which was mentioned. If there", "if thread_ts in self.message_context.keys(): if message in self.message_context[thread_ts]: no_repeat_messages.remove(message) return no_repeat_messages def respond_in_thread(self,", "import pprint from slackclient import SlackClient from bot.message import Message class Bot: def", "[] for i in finditer(pattern, message): value = i.group(1) if value not in", "which are posted in channel where the bot is \"\"\" # print(\"DEBUG: my", "avoid # repeating same message in a thread if bot_message.thread_ts not in self.message_context.keys():", "sleep from re import search, finditer from copy import copy # from pprint", "self.slack_client = SlackClient(conf.slack_bot_token) # starterbot's user ID in Slack: value is assigned #", "if bot_message.thread_ts not in self.message_context.keys(): self.message_context[bot_message.thread_ts] = [] self.message_context[bot_message.thread_ts].extend(bot_message.raw_message) self.slack_client.api_call( \"chat.postMessage\", channel=bot_message.channel, thread_ts=bot_message.thread_ts,", "print(\"I am member of {} channels: {}\" .format(len(self.g_member_channel), \",\".join([c['name'] for c in self.g_member_channel])))", "we can suggest some links analysed_message = self.analyse_message(event['text']) thread_ts = event['ts'] if 'thread_ts'", "from copy import copy # from pprint import pprint from slackclient import SlackClient", "None, None) return Message(event[\"channel\"], thread_ts, analysed_message_no_repeat, self.LINK_URL) return Message(None, None, None) def analyse_message(self,", "dont_repeat_in_thread(self, analysed_messages, thread_ts): \"\"\" Remove message from analysed message if it was already", "already provided self.message_context = {} def chat(self): if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot connected and", "up self.starterbot_id = None # constants self.RTM_READ_DELAY = 2 # 1 second delay", "for channel in self.g_member_channel] def parse_events_in_channel(self, events): \"\"\" Selecting events of type message", "= conf.match_pattern # list of channel the bot is member of self.g_member_channel =", "is assigned # after the bot starts up self.starterbot_id = None # constants", "context to avoid # repeating same message in a thread if bot_message.thread_ts not", "pattern = self.MATCH_PATTERN matchs = [] for i in finditer(pattern, message): value =", "print(\"Starter Bot connected and running!\") self.get_list_of_channels() self.bot_loop() else: print(\"Connection failed. Exception traceback printed", "self.analyse_message(event['text']) thread_ts = event['ts'] if 'thread_ts' in event.keys(): thread_ts = event['thread_ts'] if not", "\"^<@(|[WU].+?)>(.*)\" self.LINK_URL = conf.link_url self.MATCH_PATTERN = conf.match_pattern # list of channel the bot", "to the message context to avoid # repeating same message in a thread", "None, None) def analyse_message(self, message): \"\"\" find matching sub string in the message", "after the bot starts up self.starterbot_id = None # constants self.RTM_READ_DELAY = 2", "between reading from RTM self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\" self.LINK_URL = conf.link_url self.MATCH_PATTERN = conf.match_pattern", "for event in events: # pprint(event) # Parsing only messages in the channels", "def bot_loop(self): # Read bot's user ID by calling Web API method `auth.test`", "self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True: bot_message = self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def parse_direct_mention(self, message_text):", "for i in finditer(pattern, message): value = i.group(1) if value not in matchs:", "for c in self.g_member_channel]))) def check_if_member(self, channel): \"\"\" checking if the bot is", "self.message_context[thread_ts]: no_repeat_messages.remove(message) return no_repeat_messages def respond_in_thread(self, bot_message): \"\"\"Sends the response back to the", "not in self.message_context.keys(): self.message_context[bot_message.thread_ts] = [] self.message_context[bot_message.thread_ts].extend(bot_message.raw_message) self.slack_client.api_call( \"chat.postMessage\", channel=bot_message.channel, thread_ts=bot_message.thread_ts, text=bot_message.formatted_message )", "assigned # after the bot starts up self.starterbot_id = None # constants self.RTM_READ_DELAY", "{}\".format(g_member_channel)) for event in events: # pprint(event) # Parsing only messages in the", "# print(\"DEBUG: my channels: {}\".format(g_member_channel)) for event in events: # pprint(event) # Parsing", "Message class Bot: def __init__(self, conf): # instantiate Slack client self.slack_client = SlackClient(conf.slack_bot_token)", "\"\"\" Remove message from analysed message if it was already sent in the", "# pprint(channels) print(\"I am member of {} channels: {}\" .format(len(self.g_member_channel), \",\".join([c['name'] for c", "slackclient import SlackClient from bot.message import Message class Bot: def __init__(self, conf): #", "# from pprint import pprint from slackclient import SlackClient from bot.message import Message", "Slack: value is assigned # after the bot starts up self.starterbot_id = None", "the bot is \"\"\" # print(\"DEBUG: my channels: {}\".format(g_member_channel)) for event in events:", "channels: {}\" .format(len(self.g_member_channel), \",\".join([c['name'] for c in self.g_member_channel]))) def check_if_member(self, channel): \"\"\" checking", "message return (matches.group(1), matches.group(2).strip()) if matches else (None, None) def get_list_of_channels(self): \"\"\" print", ") self.g_member_channel = [channel for channel in channels['channels'] if channel['is_member']] # print(\"available channels:\")", "channels = self.slack_client.api_call( \"channels.list\", exclude_archived=1 ) self.g_member_channel = [channel for channel in channels['channels']", "search, finditer from copy import copy # from pprint import pprint from slackclient", "in channel where the bot is \"\"\" # print(\"DEBUG: my channels: {}\".format(g_member_channel)) for", "\"\"\" pattern = self.MATCH_PATTERN matchs = [] for i in finditer(pattern, message): value", "self.g_member_channel = [channel for channel in channels['channels'] if channel['is_member']] # print(\"available channels:\") #", "analysed_message: return Message(None, None, None) analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts) if not analysed_message_no_repeat: return", "the bot is member if event[\"type\"] != \"message\" or \"subtype\" in event or", "return Message(event[\"channel\"], thread_ts, analysed_message_no_repeat, self.LINK_URL) return Message(None, None, None) def analyse_message(self, message): \"\"\"", "finditer(pattern, message): value = i.group(1) if value not in matchs: matchs.append(value) if not", "Message(None, None, None) def analyse_message(self, message): \"\"\" find matching sub string in the", "delay between reading from RTM self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\" self.LINK_URL = conf.link_url self.MATCH_PATTERN =", "{}\" .format(len(self.g_member_channel), \",\".join([c['name'] for c in self.g_member_channel]))) def check_if_member(self, channel): \"\"\" checking if", "to the channel în a thread \"\"\" # Add message to the message", "thread_ts = event['ts'] if 'thread_ts' in event.keys(): thread_ts = event['thread_ts'] if not analysed_message:", "return Message(None, None, None) def analyse_message(self, message): \"\"\" find matching sub string in", "# Read bot's user ID by calling Web API method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while", "if 'thread_ts' in event.keys(): thread_ts = event['thread_ts'] if not analysed_message: return Message(None, None,", "\"\"\" Selecting events of type message with no subtype which are posted in", "def parse_direct_mention(self, message_text): \"\"\" Finds a direct mention (a mention that is at", "re import search, finditer from copy import copy # from pprint import pprint", "Exception traceback printed above.\") def bot_loop(self): # Read bot's user ID by calling", "event[\"type\"] != \"message\" or \"subtype\" in event or \\ not self.check_if_member(event[\"channel\"]): # print(\"not", "self.check_if_member(event[\"channel\"]): # print(\"not for me: type:{}\".format(event)) continue # analyse message to see if", "__init__(self, conf): # instantiate Slack client self.slack_client = SlackClient(conf.slack_bot_token) # starterbot's user ID", "Message(None, None, None) return Message(event[\"channel\"], thread_ts, analysed_message_no_repeat, self.LINK_URL) return Message(None, None, None) def", "print(\"Connection failed. Exception traceback printed above.\") def bot_loop(self): # Read bot's user ID", "matches.group(2).strip()) if matches else (None, None) def get_list_of_channels(self): \"\"\" print the list of", "import search, finditer from copy import copy # from pprint import pprint from", "formatted links \"\"\" pattern = self.MATCH_PATTERN matchs = [] for i in finditer(pattern,", "thread_ts = event['thread_ts'] if not analysed_message: return Message(None, None, None) analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message,", "no_repeat_messages.remove(message) return no_repeat_messages def respond_in_thread(self, bot_message): \"\"\"Sends the response back to the channel", "group contains the remaining message return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "analysed_message_no_repeat: return Message(None, None, None) return Message(event[\"channel\"], thread_ts, analysed_message_no_repeat, self.LINK_URL) return Message(None, None,", "thread_ts): \"\"\" Remove message from analysed message if it was already sent in", "it was already sent in the same message thread. \"\"\" # pprint(self.message_context) no_repeat_messages", "thread \"\"\" # Add message to the message context to avoid # repeating", "of a given channel \"\"\" return channel in [channel['id'] for channel in self.g_member_channel]", "search(self.MENTION_REGEX, message_text) # the first group contains the username, # the second group", "in [channel['id'] for channel in self.g_member_channel] def parse_events_in_channel(self, events): \"\"\" Selecting events of", "no direct mention, returns None \"\"\" matches = search(self.MENTION_REGEX, message_text) # the first", "user ID which was mentioned. If there is no direct mention, returns None", "= 2 # 1 second delay between reading from RTM self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"", "[] # context: in which thread the link was already provided self.message_context =", "type:{}\".format(event)) continue # analyse message to see if we can suggest some links", "second delay between reading from RTM self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\" self.LINK_URL = conf.link_url self.MATCH_PATTERN", "message in a thread if bot_message.thread_ts not in self.message_context.keys(): self.message_context[bot_message.thread_ts] = [] self.message_context[bot_message.thread_ts].extend(bot_message.raw_message)", "{} channels: {}\" .format(len(self.g_member_channel), \",\".join([c['name'] for c in self.g_member_channel]))) def check_if_member(self, channel): \"\"\"", "SlackClient from bot.message import Message class Bot: def __init__(self, conf): # instantiate Slack", "# list of channel the bot is member of self.g_member_channel = [] #", "mention that is at the beginning) in message text and returns the user", "len(matchs): return return matchs def dont_repeat_in_thread(self, analysed_messages, thread_ts): \"\"\" Remove message from analysed", "if we can suggest some links analysed_message = self.analyse_message(event['text']) thread_ts = event['ts'] if", "constants self.RTM_READ_DELAY = 2 # 1 second delay between reading from RTM self.MENTION_REGEX", "bot is member of self.g_member_channel = [] # context: in which thread the", "message with no subtype which are posted in channel where the bot is", "starterbot's user ID in Slack: value is assigned # after the bot starts", "analysed_messages, thread_ts): \"\"\" Remove message from analysed message if it was already sent", "in the same message thread. \"\"\" # pprint(self.message_context) no_repeat_messages = copy(analysed_messages) for message", "repeating same message in a thread if bot_message.thread_ts not in self.message_context.keys(): self.message_context[bot_message.thread_ts] =", "None) return Message(event[\"channel\"], thread_ts, analysed_message_no_repeat, self.LINK_URL) return Message(None, None, None) def analyse_message(self, message):", "None, None) analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts) if not analysed_message_no_repeat: return Message(None, None, None)", "the channels where the bot is member if event[\"type\"] != \"message\" or \"subtype\"", "channels \"\"\" channels = self.slack_client.api_call( \"channels.list\", exclude_archived=1 ) self.g_member_channel = [channel for channel", "message text and returns the user ID which was mentioned. If there is", "= [] # context: in which thread the link was already provided self.message_context", "def analyse_message(self, message): \"\"\" find matching sub string in the message and returns", "\"\"\" Finds a direct mention (a mention that is at the beginning) in", "events: # pprint(event) # Parsing only messages in the channels where the bot", "same message thread. \"\"\" # pprint(self.message_context) no_repeat_messages = copy(analysed_messages) for message in analysed_messages:", "event['thread_ts'] if not analysed_message: return Message(None, None, None) analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts) if", "else: print(\"Connection failed. Exception traceback printed above.\") def bot_loop(self): # Read bot's user", "if event[\"type\"] != \"message\" or \"subtype\" in event or \\ not self.check_if_member(event[\"channel\"]): #", "ID by calling Web API method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True: bot_message = self.parse_events_in_channel(self.slack_client.rtm_read())", "\"\"\" # print(\"DEBUG: my channels: {}\".format(g_member_channel)) for event in events: # pprint(event) #", "# Parsing only messages in the channels where the bot is member if", "import Message class Bot: def __init__(self, conf): # instantiate Slack client self.slack_client =", "Bot connected and running!\") self.get_list_of_channels() self.bot_loop() else: print(\"Connection failed. Exception traceback printed above.\")", "printed above.\") def bot_loop(self): # Read bot's user ID by calling Web API", "group contains the username, # the second group contains the remaining message return", "pprint(channels) print(\"I am member of {} channels: {}\" .format(len(self.g_member_channel), \",\".join([c['name'] for c in", "Slack client self.slack_client = SlackClient(conf.slack_bot_token) # starterbot's user ID in Slack: value is", "Message(None, None, None) analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts) if not analysed_message_no_repeat: return Message(None, None,", "bot's user ID by calling Web API method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True: bot_message", "{} def chat(self): if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot connected and running!\") self.get_list_of_channels() self.bot_loop() else:", "message in self.message_context[thread_ts]: no_repeat_messages.remove(message) return no_repeat_messages def respond_in_thread(self, bot_message): \"\"\"Sends the response back", "i.group(1) if value not in matchs: matchs.append(value) if not len(matchs): return return matchs", "API method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True: bot_message = self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY)", "# after the bot starts up self.starterbot_id = None # constants self.RTM_READ_DELAY =", "analysed_message = self.analyse_message(event['text']) thread_ts = event['ts'] if 'thread_ts' in event.keys(): thread_ts = event['thread_ts']", "conf.link_url self.MATCH_PATTERN = conf.match_pattern # list of channel the bot is member of", "from time import sleep from re import search, finditer from copy import copy", "self.MATCH_PATTERN matchs = [] for i in finditer(pattern, message): value = i.group(1) if", "= event['thread_ts'] if not analysed_message: return Message(None, None, None) analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts)", "bot_loop(self): # Read bot's user ID by calling Web API method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"]", "= SlackClient(conf.slack_bot_token) # starterbot's user ID in Slack: value is assigned # after", "mentioned. If there is no direct mention, returns None \"\"\" matches = search(self.MENTION_REGEX,", "in events: # pprint(event) # Parsing only messages in the channels where the", "or \"subtype\" in event or \\ not self.check_if_member(event[\"channel\"]): # print(\"not for me: type:{}\".format(event))", "calling Web API method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True: bot_message = self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel:", "\"\"\"Sends the response back to the channel în a thread \"\"\" # Add", "thread if bot_message.thread_ts not in self.message_context.keys(): self.message_context[bot_message.thread_ts] = [] self.message_context[bot_message.thread_ts].extend(bot_message.raw_message) self.slack_client.api_call( \"chat.postMessage\", channel=bot_message.channel,", "Add message to the message context to avoid # repeating same message in", "at the beginning) in message text and returns the user ID which was", ".format(len(self.g_member_channel), \",\".join([c['name'] for c in self.g_member_channel]))) def check_if_member(self, channel): \"\"\" checking if the", "thread_ts in self.message_context.keys(): if message in self.message_context[thread_ts]: no_repeat_messages.remove(message) return no_repeat_messages def respond_in_thread(self, bot_message):", "not len(matchs): return return matchs def dont_repeat_in_thread(self, analysed_messages, thread_ts): \"\"\" Remove message from", "= conf.link_url self.MATCH_PATTERN = conf.match_pattern # list of channel the bot is member", "remaining message return (matches.group(1), matches.group(2).strip()) if matches else (None, None) def get_list_of_channels(self): \"\"\"", "return return matchs def dont_repeat_in_thread(self, analysed_messages, thread_ts): \"\"\" Remove message from analysed message", "self.LINK_URL) return Message(None, None, None) def analyse_message(self, message): \"\"\" find matching sub string", "not in matchs: matchs.append(value) if not len(matchs): return return matchs def dont_repeat_in_thread(self, analysed_messages,", "# print(\"available channels:\") # pprint(channels) print(\"I am member of {} channels: {}\" .format(len(self.g_member_channel),", "of type message with no subtype which are posted in channel where the", "message context to avoid # repeating same message in a thread if bot_message.thread_ts", "= search(self.MENTION_REGEX, message_text) # the first group contains the username, # the second", "for message in analysed_messages: if thread_ts in self.message_context.keys(): if message in self.message_context[thread_ts]: no_repeat_messages.remove(message)", "was mentioned. If there is no direct mention, returns None \"\"\" matches =", "self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\" self.LINK_URL = conf.link_url self.MATCH_PATTERN = conf.match_pattern # list of channel", "analysed_messages: if thread_ts in self.message_context.keys(): if message in self.message_context[thread_ts]: no_repeat_messages.remove(message) return no_repeat_messages def", "there is no direct mention, returns None \"\"\" matches = search(self.MENTION_REGEX, message_text) #", "bot starts up self.starterbot_id = None # constants self.RTM_READ_DELAY = 2 # 1", "1 second delay between reading from RTM self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\" self.LINK_URL = conf.link_url", "in the channels where the bot is member if event[\"type\"] != \"message\" or", "event.keys(): thread_ts = event['thread_ts'] if not analysed_message: return Message(None, None, None) analysed_message_no_repeat =", "the channel în a thread \"\"\" # Add message to the message context", "if the bot is member of a given channel \"\"\" return channel in", "Bot: def __init__(self, conf): # instantiate Slack client self.slack_client = SlackClient(conf.slack_bot_token) # starterbot's", "analyse message to see if we can suggest some links analysed_message = self.analyse_message(event['text'])", "from bot.message import Message class Bot: def __init__(self, conf): # instantiate Slack client", "# the second group contains the remaining message return (matches.group(1), matches.group(2).strip()) if matches", "Remove message from analysed message if it was already sent in the same", "are posted in channel where the bot is \"\"\" # print(\"DEBUG: my channels:", "is no direct mention, returns None \"\"\" matches = search(self.MENTION_REGEX, message_text) # the", "matching sub string in the message and returns a list of formatted links", "a thread \"\"\" # Add message to the message context to avoid #", "= self.parse_events_in_channel(self.slack_client.rtm_read()) if bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def parse_direct_mention(self, message_text): \"\"\" Finds a direct", "[channel['id'] for channel in self.g_member_channel] def parse_events_in_channel(self, events): \"\"\" Selecting events of type", "# constants self.RTM_READ_DELAY = 2 # 1 second delay between reading from RTM", "direct mention, returns None \"\"\" matches = search(self.MENTION_REGEX, message_text) # the first group", "= \"^<@(|[WU].+?)>(.*)\" self.LINK_URL = conf.link_url self.MATCH_PATTERN = conf.match_pattern # list of channel the", "channel in [channel['id'] for channel in self.g_member_channel] def parse_events_in_channel(self, events): \"\"\" Selecting events", "suggest some links analysed_message = self.analyse_message(event['text']) thread_ts = event['ts'] if 'thread_ts' in event.keys():", "bot_message.thread_ts not in self.message_context.keys(): self.message_context[bot_message.thread_ts] = [] self.message_context[bot_message.thread_ts].extend(bot_message.raw_message) self.slack_client.api_call( \"chat.postMessage\", channel=bot_message.channel, thread_ts=bot_message.thread_ts, text=bot_message.formatted_message", "chat(self): if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot connected and running!\") self.get_list_of_channels() self.bot_loop() else: print(\"Connection failed.", "return no_repeat_messages def respond_in_thread(self, bot_message): \"\"\"Sends the response back to the channel în", "return channel in [channel['id'] for channel in self.g_member_channel] def parse_events_in_channel(self, events): \"\"\" Selecting", "\"message\" or \"subtype\" in event or \\ not self.check_if_member(event[\"channel\"]): # print(\"not for me:", "message): \"\"\" find matching sub string in the message and returns a list", "class Bot: def __init__(self, conf): # instantiate Slack client self.slack_client = SlackClient(conf.slack_bot_token) #", "no_repeat_messages def respond_in_thread(self, bot_message): \"\"\"Sends the response back to the channel în a", "a direct mention (a mention that is at the beginning) in message text", "thread. \"\"\" # pprint(self.message_context) no_repeat_messages = copy(analysed_messages) for message in analysed_messages: if thread_ts", "def __init__(self, conf): # instantiate Slack client self.slack_client = SlackClient(conf.slack_bot_token) # starterbot's user", "value is assigned # after the bot starts up self.starterbot_id = None #", "message and returns a list of formatted links \"\"\" pattern = self.MATCH_PATTERN matchs", "above.\") def bot_loop(self): # Read bot's user ID by calling Web API method", "'thread_ts' in event.keys(): thread_ts = event['thread_ts'] if not analysed_message: return Message(None, None, None)", "= i.group(1) if value not in matchs: matchs.append(value) if not len(matchs): return return", "or \\ not self.check_if_member(event[\"channel\"]): # print(\"not for me: type:{}\".format(event)) continue # analyse message", "analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts) if not analysed_message_no_repeat: return Message(None, None, None) return Message(event[\"channel\"],", "and running!\") self.get_list_of_channels() self.bot_loop() else: print(\"Connection failed. Exception traceback printed above.\") def bot_loop(self):", "list of channel the bot is member of self.g_member_channel = [] # context:", "def chat(self): if self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot connected and running!\") self.get_list_of_channels() self.bot_loop() else: print(\"Connection", "in the message and returns a list of formatted links \"\"\" pattern =", "copy # from pprint import pprint from slackclient import SlackClient from bot.message import", "member if event[\"type\"] != \"message\" or \"subtype\" in event or \\ not self.check_if_member(event[\"channel\"]):", "if it was already sent in the same message thread. \"\"\" # pprint(self.message_context)", "mention (a mention that is at the beginning) in message text and returns", "from pprint import pprint from slackclient import SlackClient from bot.message import Message class", "\"subtype\" in event or \\ not self.check_if_member(event[\"channel\"]): # print(\"not for me: type:{}\".format(event)) continue", "print(\"not for me: type:{}\".format(event)) continue # analyse message to see if we can", "in which thread the link was already provided self.message_context = {} def chat(self):", "my channels: {}\".format(g_member_channel)) for event in events: # pprint(event) # Parsing only messages", "can suggest some links analysed_message = self.analyse_message(event['text']) thread_ts = event['ts'] if 'thread_ts' in", "copy(analysed_messages) for message in analysed_messages: if thread_ts in self.message_context.keys(): if message in self.message_context[thread_ts]:", "the beginning) in message text and returns the user ID which was mentioned.", "was already sent in the same message thread. \"\"\" # pprint(self.message_context) no_repeat_messages =", "return Message(None, None, None) analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts) if not analysed_message_no_repeat: return Message(None,", "check_if_member(self, channel): \"\"\" checking if the bot is member of a given channel", "pprint(event) # Parsing only messages in the channels where the bot is member", "in analysed_messages: if thread_ts in self.message_context.keys(): if message in self.message_context[thread_ts]: no_repeat_messages.remove(message) return no_repeat_messages", "channel in channels['channels'] if channel['is_member']] # print(\"available channels:\") # pprint(channels) print(\"I am member", "ID in Slack: value is assigned # after the bot starts up self.starterbot_id", "in matchs: matchs.append(value) if not len(matchs): return return matchs def dont_repeat_in_thread(self, analysed_messages, thread_ts):", "pprint from slackclient import SlackClient from bot.message import Message class Bot: def __init__(self,", "\"\"\" return channel in [channel['id'] for channel in self.g_member_channel] def parse_events_in_channel(self, events): \"\"\"", "a given channel \"\"\" return channel in [channel['id'] for channel in self.g_member_channel] def", "\\ not self.check_if_member(event[\"channel\"]): # print(\"not for me: type:{}\".format(event)) continue # analyse message to", "# 1 second delay between reading from RTM self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\" self.LINK_URL =", "channel in self.g_member_channel] def parse_events_in_channel(self, events): \"\"\" Selecting events of type message with", "the remaining message return (matches.group(1), matches.group(2).strip()) if matches else (None, None) def get_list_of_channels(self):", "return (matches.group(1), matches.group(2).strip()) if matches else (None, None) def get_list_of_channels(self): \"\"\" print the", "text and returns the user ID which was mentioned. If there is no", "Finds a direct mention (a mention that is at the beginning) in message", "None) analysed_message_no_repeat = self.dont_repeat_in_thread(analysed_message, thread_ts) if not analysed_message_no_repeat: return Message(None, None, None) return", "the second group contains the remaining message return (matches.group(1), matches.group(2).strip()) if matches else", "self.get_list_of_channels() self.bot_loop() else: print(\"Connection failed. Exception traceback printed above.\") def bot_loop(self): # Read", "# pprint(self.message_context) no_repeat_messages = copy(analysed_messages) for message in analysed_messages: if thread_ts in self.message_context.keys():", "list of available channels \"\"\" channels = self.slack_client.api_call( \"channels.list\", exclude_archived=1 ) self.g_member_channel =", "channels where the bot is member if event[\"type\"] != \"message\" or \"subtype\" in", "Selecting events of type message with no subtype which are posted in channel", "instantiate Slack client self.slack_client = SlackClient(conf.slack_bot_token) # starterbot's user ID in Slack: value", "self.slack_client.rtm_connect(with_team_state=False): print(\"Starter Bot connected and running!\") self.get_list_of_channels() self.bot_loop() else: print(\"Connection failed. Exception traceback", "a thread if bot_message.thread_ts not in self.message_context.keys(): self.message_context[bot_message.thread_ts] = [] self.message_context[bot_message.thread_ts].extend(bot_message.raw_message) self.slack_client.api_call( \"chat.postMessage\",", "\",\".join([c['name'] for c in self.g_member_channel]))) def check_if_member(self, channel): \"\"\" checking if the bot", "Read bot's user ID by calling Web API method `auth.test` self.slack_client.api_call(\"auth.test\")[\"user_id\"] while True:", "member of a given channel \"\"\" return channel in [channel['id'] for channel in", "events of type message with no subtype which are posted in channel where", "channels:\") # pprint(channels) print(\"I am member of {} channels: {}\" .format(len(self.g_member_channel), \",\".join([c['name'] for", "RTM self.MENTION_REGEX = \"^<@(|[WU].+?)>(.*)\" self.LINK_URL = conf.link_url self.MATCH_PATTERN = conf.match_pattern # list of", "beginning) in message text and returns the user ID which was mentioned. If", "message from analysed message if it was already sent in the same message", "the bot is member of self.g_member_channel = [] # context: in which thread", "member of {} channels: {}\" .format(len(self.g_member_channel), \",\".join([c['name'] for c in self.g_member_channel]))) def check_if_member(self,", "# Add message to the message context to avoid # repeating same message", "in a thread if bot_message.thread_ts not in self.message_context.keys(): self.message_context[bot_message.thread_ts] = [] self.message_context[bot_message.thread_ts].extend(bot_message.raw_message) self.slack_client.api_call(", "see if we can suggest some links analysed_message = self.analyse_message(event['text']) thread_ts = event['ts']", "bot.message import Message class Bot: def __init__(self, conf): # instantiate Slack client self.slack_client", "available channels \"\"\" channels = self.slack_client.api_call( \"channels.list\", exclude_archived=1 ) self.g_member_channel = [channel for", "if not analysed_message_no_repeat: return Message(None, None, None) return Message(event[\"channel\"], thread_ts, analysed_message_no_repeat, self.LINK_URL) return", "from analysed message if it was already sent in the same message thread.", "self.message_context.keys(): if message in self.message_context[thread_ts]: no_repeat_messages.remove(message) return no_repeat_messages def respond_in_thread(self, bot_message): \"\"\"Sends the", "if bot_message.channel: self.respond_in_thread(bot_message) sleep(self.RTM_READ_DELAY) def parse_direct_mention(self, message_text): \"\"\" Finds a direct mention (a", "if not len(matchs): return return matchs def dont_repeat_in_thread(self, analysed_messages, thread_ts): \"\"\" Remove message", "bot is member if event[\"type\"] != \"message\" or \"subtype\" in event or \\", "self.g_member_channel]))) def check_if_member(self, channel): \"\"\" checking if the bot is member of a" ]
[ "seed=123) test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123) # Binarize train, validation and test", "numpy as np import os import pandas as pd import random import requests", "0.0) val_data_te_ratings = val_data_te.copy() val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0) # Binarize", "ngcf_model]: recommendations = model.recommend(users, k=10) recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item}) recommendations =", "tf.SparseTensor(indices, coo.data, coo.shape) A_tilde # # Train models # ## Graph Convoultional Networks", "merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) != len(set(test['userId'])): print('Number of users in train and test are", "val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123) # Binarize", "x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val ) # ### Recommend with SVAE # In[28]: # Model", "train_test_split from tensorflow.python.framework.ops import disable_eager_execution from tqdm import tqdm from utils import stratified_split,", "n_items = n_movies, n_layers = 3 ) ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Recommend", "test_common = test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) != len(set(test['userId'])): print('Number of users", "'SVD'] comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']) # Convert test user ids", "# From the predictions, we want only the top k for each user,", "We can see how LightGCN improves in ranking metrics compared to NGCF by", "<NAME>, <NAME>, <NAME>, <NAME>, & <NAME>, Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108 #", "train/validation/test users n_users = len(unique_users) train_users = unique_users[:(n_users - HELDOUT_USERS * 2)] val_users", "test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"# of users in BOTH train and test: {len(set(svd_pred_common['userId']))}\")", "dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]: # Create user-item graph (sparse matix where users are", "the winning algorithm for 2009's Netflix Prize competition, SVD++. # # Models include", "= [name, pak, rak, map, ndcg] # In[33]: comparison # # References: #", "for x in test['userId'].unique()]) for rec, name in zip(recs, model_names): tester = test_df", "get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd \"{path}\"') import sys; sys.path.append(path) get_ipython().system(u'git config --global user.email \"<EMAIL>\"') get_ipython().system(u'git", "df = df.groupby('movieId').filter(lambda x: len(x) >= 1) # Obtain both usercount and itemcount", "= os.path.join('./data/bronze', 'u.data') raw_data = pd.read_csv(fp, sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp']) print(f'Shape: {raw_data.shape}')", "pd.merge(temp, predictions, on=['userId', 'movieId'], how=\"outer\") merged = merged[merged['seen'].isnull()].drop('seen', axis=1) # Create filter for", "only the top k for each user, # not all the recommendations. #", "with SVAE # In[28]: # Model prediction on the training part of test", "the train and test set common_users = set(test['userId']).intersection(set(predictions['userId'])) # Filter the test and", "'title'], usecols = range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123) # In[15]: train_size =", "column), data) coo = norm_adj_mat.tocoo().astype(np.float32) # create an index that will tell SparseTensor", "user, # not all the recommendations. # Extract the top k recommendations from", "in the span of just 1-2 years. # In[32]: model_names = ['LightGCN', 'NGCF',", "= train.append(test) n_users = combined['userId'].nunique() print('Number of users:', n_users) n_movies = combined['movieId'].nunique() print('Number", "train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex = pd.merge(test, movie_new, on='movieId', how='left') # Reset", "coo.shape) A_tilde # # Train models # ## Graph Convoultional Networks (GCNs) #", "movie and user are connected. adj_mat[:n_users, n_users:] = R adj_mat[n_users:, :n_users] = R.T", "utils import stratified_split, numpy_stratified_split import build_features import metrics from models import SVAE from", "Comparison # # In this notebook we compare different recommendation systems starting with", "0-n_users. train_reindex['userId_new'] = train_reindex['userId'] - 1 train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex =", "### SVD # In[30]: svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train) # ###", "= np.where(test_data_tr > 3.5, 1.0, 0.0) # Binarize test data: testing part (save", "= svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse matrix back to df recommendations = am_test.map_back_sparse(top_k, kind='prediction')", "set recs.append(top_k) # # Compare performance # Looking at all 5 of our", "LightGCN(A_tilde, n_users = n_users, n_items = n_movies, n_layers = 3) # In[22]: optimizer", "am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain the sparse matrix", "Recall@k # * Mean Average Precision (MAP) # * Normalized Discounted Cumulative Gain", ": user_new=test_map_users.get(user_old) # new value item_new=test_map_items.get(item_old) # new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if", "include movies recommendations that are also in the test set recs.append(top_k) # ##", "= R.T adj_mat # In[19]: # Calculate degree matrix D (for every row", "both the train and test set common_users = set(test['userId']).intersection(set(predictions['userId'])) # Filter the test", "remove movies that have alread been seen (seen=1) merged = pd.merge(temp, predictions, on=['userId',", "with the predicted movie's rank for each user top_k = recommendations.copy() top_k['rank'] =", "3.5, 1.0, 0.0) val_data_te_ratings = val_data_te.copy() val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0)", "for rec, name in zip(recs, model_names): tester = test_df if name == 'SVAE'", "= val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the sparse matrix generation for train,", "we can see that the state-of-the-art model LightGCN vastly outperforms all other models.", "in BOTH train and test: {len(set(svd_pred_common['userId']))}\") continue # From the predictions, we want", "{len(set(test['userId']))}\") print(f\"# of users in BOTH train and test: {len(set(svd_pred_common['userId']))}\") continue # From", "import stratified_split, numpy_stratified_split import build_features import metrics from models import SVAE from models.GCN", "= merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) != len(set(test['userId'])): print('Number of users in train and test", "## Standard Variational Autoencoder (SVAE) # In[26]: # Binarize the data (only keep", "cell compares the performance of the different models using ranking metrics: # #", "x: len(x) >= 1) # Obtain both usercount and itemcount after filtering usercount", "= sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1 # Create the adjaceny matrix", "path = \"/content/\" + project_name; get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd \"{path}\"') import sys; sys.path.append(path) get_ipython().system(u'git", "df.groupby('movieId').filter(lambda x: len(x) >= 1) # Obtain both usercount and itemcount after filtering", "rec, name in zip(recs, model_names): tester = test_df if name == 'SVAE' else", "val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in df_low_rating.iterrows(): user_old= i['userId'] # old value item_old=i['movieId'] # old", "= 3 ) ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Recommend with LightGCN and NGCF", "= svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 top_k", "unique_users[(n_users - HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)] unique_train_items", "combined['userId'].nunique() print('Number of users:', n_users) n_movies = combined['movieId'].nunique() print('Number of movies:', n_movies) #", "as sp import surprise import tensorflow as tf from sklearn.model_selection import train_test_split from", "For each user, only include movies recommendations that are also in the test", "have the same users between them test_common = test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)] if", "as_index = False).size() itemcount = df[['movieId']].groupby('movieId', as_index = False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users", "sister model to LightGCN, but only by a single year. We can see", "sort=False).cumcount() + 1 top_k = top_movies.copy() top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 #", "= pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the sparse matrix", "metrics compared to NGCF by simply removing unnecessary operations. # # In conclusion,", "seen (seen=1) merged = pd.merge(temp, predictions, on=['userId', 'movieId'], how=\"outer\") merged = merged[merged['seen'].isnull()].drop('seen', axis=1)", "and from indexes item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id =", "* Normalized Discounted Cumulative Gain (NDCG) # # where $k=10$ # # #", "model architectures with notable performance increases can be developed in the span of", "!= len(set(test['userId'])): print('Number of users in train and test are NOT equal') print(f\"#", "the same users?: {set(train.userId) == set(test.userId)}') # In[16]: combined = train.append(test) n_users =", "train_reindex = pd.merge(train, movie_new, on='movieId', how='left') # Reset index to 0-n_users. train_reindex['userId_new'] =", "architectures with notable performance increases can be developed in the span of just", "df_preferred = raw_data[raw_data['rating'] > 3.5] df_low_rating = raw_data[raw_data['rating'] <= 3.5] df = df_preferred.groupby('userId').filter(lambda", "# # # * Precision@k # * Recall@k # * Mean Average Precision", "x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val ) # ### Recommend with SVAE # In[28]: #", "of all 1s temp = train[['userId', 'movieId']].copy() temp['seen'] = 1 # Outer join", "from indexes item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id = dict(zip(train['userId'],", "2020, https://arxiv.org/abs/2002.02126 # 2. <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>, Neural Graph Collaorative", "= metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank') map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank')", "id2item}) recommendations = recommendations.merge(movie_titles, how='left', on='movieId' )[['userId', 'movieId', 'title', 'prediction']] # Create column", "val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123) #", "a widely used algorithm during the Netflix Prize competition, LightGCN achieves an increase", ") ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Recommend with LightGCN and NGCF # In[24]:", "recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include", "val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix() # Split validation", "= recommendations.merge(movie_titles, how='left', on='movieId' )[['userId', 'movieId', 'title', 'prediction']] # Create column with the", "as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 top_k = top_movies.copy()", "LightGCN, but only by a single year. We can see how LightGCN improves", "pak, rak, map, ndcg] # In[33]: comparison # # References: # # 1.", "unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users = np.random.permutation(unique_users) HELDOUT_USERS = 200 # Create train/validation/test users", "validation and test sets train_data, _, _ = am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items =", "surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train) # ### SVD # In[30]: svd = surprise.SVD(random_state=0,", "{len(set(svd_pred_common['userId']))}\") continue # From the predictions, we want only the top k for", "{len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"# of users in BOTH train and test: {len(set(svd_pred_common['userId']))}\") continue #", "\"{project_path}\"') # In[34]: get_ipython().system(u'git status') # In[35]: get_ipython().system(u'git add . && git commit", "False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users = np.random.permutation(unique_users) HELDOUT_USERS = 200 # Create train/validation/test", "# In[35]: get_ipython().system(u'git add . && git commit -m \\'commit\\' && git push", "used for calculating NDCG) test_data_te_ratings = test_data_te.copy() test_data_te = np.where(test_data_te > 3.5, 1.0,", "am_test.map_back_sparse(top_k, kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the original ratings", "In[30]: svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train) # ### Recommend with SVD++", "items from training set for all sets am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val =", "stratified_split(raw_data, 'userId', train_size) print(f'Train Shape: {train.shape}') print(f'Test Shape: {test.shape}') print(f'Do they have the", "= test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the sparse matrix generation for train, validation and test", "val_map_items = am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix() # Split validation and test", "n_users = combined['userId'].nunique() print('Number of users:', n_users) n_movies = combined['movieId'].nunique() print('Number of movies:',", "val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0) val_data_te_ratings = val_data_te.copy() val_data_te = np.where(val_data_te", "optimizer=optimizer) # ### Neural Graph Collaborative Filtering (NGCF) # In[23]: ngcf_model = NGCF(A_tilde,", "with LightGCN and NGCF # In[24]: # Convert test user ids to the", "<= 3.5] df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5) df = df.groupby('movieId').filter(lambda x:", "can convert to and from indexes item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'],", "users between them test_common = test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) != len(set(test['userId'])):", "and test set common_users = set(test['userId']).intersection(set(predictions['userId'])) # Filter the test and predictions so", "{movie_titles.shape}') movie_titles.sample(10, random_state=123) # In[15]: train_size = 0.75 train, test = stratified_split(raw_data, 'userId',", "the same users between them test_common = test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)] if len(set(merged['userId']))", "= 1 # Outer join and remove movies that have alread been seen", "will be used for calculating NDCG) test_data_te_ratings = test_data_te.copy() test_data_te = np.where(test_data_te >", "np.where(train_data > 3.5, 1.0, 0.0) val_data = np.where(val_data > 3.5, 1.0, 0.0) test_data", "= dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]: # Create user-item graph (sparse matix where users", "1) # Obtain both usercount and itemcount after filtering usercount = df[['userId']].groupby('userId', as_index", "user-item graph (sparse matix where users are rows and movies are columns. #", "batch_size=1024, optimizer=optimizer) # ### Recommend with LightGCN and NGCF # In[24]: # Convert", "# not all the recommendations. # Extract the top k recommendations from the", "items = train['movieId'].unique() for user in users: for item in items: predictions.append([user, item,", "# --- # # Collaborative Filtering Comparison # # In this notebook we", "back to df recommendations = am_test.map_back_sparse(top_k, kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use", "from scratch in Tensorflow. # # The last cell compares the performance of", "with the values of D^(-0.5) are the diagonals. D_inv_sq_root = sp.diags(D_inv_values) # Eval", "old value if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is not None) :", "comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']) # Convert test user ids to", "=sorted(df.userId.unique()) np.random.seed(123) unique_users = np.random.permutation(unique_users) HELDOUT_USERS = 200 # Create train/validation/test users n_users", "# In this notebook we compare different recommendation systems starting with the state-of-the-art", "1 test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']] # Create dictionaries so we can convert", "np.where(val_data_tr > 3.5, 1.0, 0.0) val_data_te_ratings = val_data_te.copy() val_data_te = np.where(val_data_te > 3.5,", "sparse matrix for train, validation and test sets train_data, _, _ = am_train.gen_affinity_matrix()", "model to LightGCN, but only by a single year. We can see how", "-m \\'commit\\' && git push origin \"{branch}\"') # In[7]: import sys sys.path.insert(0, './code')", "n_movies = combined['movieId'].nunique() print('Number of movies:', n_movies) # In[17]: # Create DataFrame with", "= train[['userId', 'movieId']].copy() temp['seen'] = 1 # Outer join and remove movies that", "Obtain the sparse matrix for train, validation and test sets train_data, _, _", "merged = merged[merged['seen'].isnull()].drop('seen', axis=1) # Create filter for users that appear in both", "In[15]: train_size = 0.75 train, test = stratified_split(raw_data, 'userId', train_size) print(f'Train Shape: {train.shape}')", "# Prepare data # In[9]: fp = os.path.join('./data/bronze', 'u.data') raw_data = pd.read_csv(fp, sep='\\t',", "> 3.5, 1.0, 0.0) # Binarize test data: testing part (save non-binary version", "'Recall@k', 'MAP', 'NDCG']) # Convert test user ids to the new ids users", "not None) and (val_map_items.get(item_old) is not None) : user_new=val_map_users.get(user_old) # new value item_new=val_map_items.get(item_old)", "\"sparsh-ai\" project_path = os.path.join('/content', project_name) # In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')", "# Create sparse matrix with the values of D^(-0.5) are the diagonals. D_inv_sq_root", "data: training part test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0) # Binarize test", "= 1 # Create the adjaceny matrix with the user-item graph. adj_mat =", "* A * D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: # to COOrdinate format", "user ids to the new ids users = np.array([user2id[x] for x in test['userId'].unique()])", "In[16]: combined = train.append(test) n_users = combined['userId'].nunique() print('Number of users:', n_users) n_movies =", "we can convert to and from indexes item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item =", "NDCG) test_data_te_ratings = test_data_te.copy() test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0) # retrieve", "sparse tensor A_tilde = tf.SparseTensor(indices, coo.data, coo.shape) A_tilde # # Train models #", "math import numpy as np import os import pandas as pd import random", "am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain the sparse matrix for train, validation and", "latent_dim=64, n_epochs=400, batch_size=100, k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data,", "# In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True)", "performance # Looking at all 5 of our models, we can see that", "NDCG by 35%**. # # NGCF is the older sister model to LightGCN,", "Tensorflow. # # The last cell compares the performance of the different models", "project_name = \"reco-tut-mlh\"; branch = \"main\"; account = \"sparsh-ai\" project_path = os.path.join('/content', project_name)", "item_old=i['movieId'] # old value if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is not", "both usercount and itemcount after filtering usercount = df[['userId']].groupby('userId', as_index = False).size() itemcount", "D (for every row count the number of nonzero entries) D_values = np.array(adj_mat.sum(1))", "of users in BOTH train and test: {len(set(svd_pred_common['userId']))}\") continue # From the predictions,", "of the different models using ranking metrics: # # # * Precision@k #", "# # # # Imports # In[4]: get_ipython().system(u'pip install -q surprise') # In[8]:", "train_data, _, _ = am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items", "not None) : user_new=test_map_users.get(user_old) # new value item_new=test_map_items.get(item_old) # new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]=", "disable_eager_execution from tqdm import tqdm from utils import stratified_split, numpy_stratified_split import build_features import", "test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is not None) :", "already seen by users # Create column of all 1s temp = train[['userId',", "import sys; sys.path.append(path) get_ipython().system(u'git config --global user.email \"<EMAIL>\"') get_ipython().system(u'git config --global user.name \"reco-tut\"')", "Movies and users are nodes/vertices. # 1 if the movie and user are", "# Create column of all 1s temp = train[['userId', 'movieId']].copy() temp['seen'] = 1", "# ### SVD # In[30]: svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train) #", "at all 5 of our models, we can see that the state-of-the-art model", "Calculate degree matrix D (for every row count the number of nonzero entries)", "svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100, k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5,", "n_epochs=10, verbose=True) svdpp.fit(surprise_train) # ### SVD # In[30]: svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10,", "all 5 of our models, we can see that the state-of-the-art model LightGCN", "the non-zero points are indices = np.mat([coo.row, coo.col]).transpose() # covert to sparse tensor", "print(f'Test Shape: {test.shape}') print(f'Do they have the same users?: {set(train.userId) == set(test.userId)}') #", "map, ndcg] # In[33]: comparison # # References: # # 1. <NAME>, <NAME>,", "[] for model in [light_model, ngcf_model]: recommendations = model.recommend(users, k=10) recommendations = recommendations.replace({'userId':", "'movieId_new', 'rating']] # Create dictionaries so we can convert to and from indexes", "removing unnecessary operations. # # In conclusion, this demonstrates how far recommendation systems", "users n_users = len(unique_users) train_users = unique_users[:(n_users - HELDOUT_USERS * 2)] val_users =", "winning algorithm for 2009's Netflix Prize competition, SVD++. # # Models include in", "= am_test.gen_affinity_matrix() # Split validation and test data into training and testing parts", "on='movieId', how='left') # Reset index to 0-n_users. test_reindex['userId_new'] = test_reindex['userId'] - 1 test_reindex", "= \"/content/\" + project_name; get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd \"{path}\"') import sys; sys.path.append(path) get_ipython().system(u'git config", "algorithm for 2009's Netflix Prize competition, SVD++. # # Models include in order", "coo = norm_adj_mat.tocoo().astype(np.float32) # create an index that will tell SparseTensor where the", "rating if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is not None) : user_new=val_map_users.get(user_old)", "n_epochs=400, batch_size=100, k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr,", "in train and test are NOT equal') print(f\"# of users in train and", "coo.data, coo.shape) A_tilde # # Train models # ## Graph Convoultional Networks (GCNs)", "_, _ = am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items =", "the predicted movie's rank for each user top_k = recommendations.copy() top_k['rank'] = recommendations.groupby('userId',", "= NGCF(A_tilde, n_users = n_users, n_items = n_movies, n_layers = 3 ) ngcf_model.fit(epochs=25,", "- HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)] test_users = unique_users[(n_users - HELDOUT_USERS):]", "Mean Average Precision (MAP) # * Normalized Discounted Cumulative Gain (NDCG) # #", "test user ids to the new ids users = np.array([user2id[x] for x in", "for x in test['userId'].unique()]) recs = [] for model in [light_model, ngcf_model]: recommendations", "= df[['movieId']].groupby('movieId', as_index = False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users = np.random.permutation(unique_users) HELDOUT_USERS =", "= top_movies.copy() top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 # For each user, only", "Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126 # 2. <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>,", "None) and (test_map_items.get(item_old) is not None) : user_new=test_map_users.get(user_old) # new value item_new=test_map_items.get(item_old) #", "dict(zip(train['userId'], train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]: # Create user-item graph (sparse", "Create user-item graph (sparse matix where users are rows and movies are columns.", "Autoencoder (SVAE) # In[26]: # Binarize the data (only keep ratings >= 4)", "in test['userId'].unique()]) for rec, name in zip(recs, model_names): tester = test_df if name", "all 1s temp = train[['userId', 'movieId']].copy() temp['seen'] = 1 # Outer join and", "are nodes/vertices. # 1 if the movie and user are connected. adj_mat[:n_users, n_users:]", "push origin \"{branch}\"') # In[7]: import sys sys.path.insert(0, './code') # --- # #", "test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']] # Create dictionaries so we can convert to", "D_inv_values[np.isinf(D_inv_values)] = 0.0 # Create sparse matrix with the values of D^(-0.5) are", "Discounted Cumulative Gain (NDCG) # # where $k=10$ # # # # Imports", "so they have the same users between them test_common = test[test['userId'].isin(common_users)] svd_pred_common =", "movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex = pd.merge(train, movie_new, on='movieId', how='left') # Reset index to", "id2user, 'movieId': id2item}) recommendations = recommendations.merge(movie_titles, how='left', on='movieId' )[['userId', 'movieId', 'title', 'prediction']] #", "in Tensorflow. # # The last cell compares the performance of the different", "Standard Variational Autoencoder (SVAE) # In[26]: # Binarize the data (only keep ratings", "item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id = dict(zip(train['userId'], train_reindex['userId_new'])) id2user", "# new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old)", "that the state-of-the-art model LightGCN vastly outperforms all other models. When compared to", "top_movies.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that", "in the test set recs.append(top_k) # # Compare performance # Looking at all", "NGCF is the older sister model to LightGCN, but only by a single", "recommendations.replace({'userId': id2user, 'movieId': id2item}) recommendations = recommendations.merge(movie_titles, how='left', on='movieId' )[['userId', 'movieId', 'title', 'prediction']]", "models. When compared to SVD++, a widely used algorithm during the Netflix Prize", "the training part of test set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse matrix", "the adjaceny matrix with the user-item graph. adj_mat = sp.dok_matrix((n_users + n_movies, n_users", "3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb # 4. <NAME>, Netflix Prize and SVD, 2014,", "numpy_stratified_split(test_data, ratio=0.75, seed=123) # Binarize train, validation and test data train_data = np.where(train_data", "compares the performance of the different models using ranking metrics: # # #", "usercount and itemcount after filtering usercount = df[['userId']].groupby('userId', as_index = False).size() itemcount =", "the top k for each user, # not all the recommendations. # Extract", "test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) != len(set(test['userId'])): print('Number of users in train", "from tqdm import tqdm from utils import stratified_split, numpy_stratified_split import build_features import metrics", "coding: utf-8 # In[1]: import os project_name = \"reco-tut-mlh\"; branch = \"main\"; account", "3.5] df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5) df = df.groupby('movieId').filter(lambda x: len(x)", "Extract the top k recommendations from the predictions top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x:", "with the original ratings # Create column with the predicted movie's rank for", "'userId', 'movieId', 'rank') comparison.loc[len(comparison)] = [name, pak, rak, map, ndcg] # In[33]: comparison", "add . && git commit -m \\'commit\\' && git push origin \"{branch}\"') #", "<NAME>, LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126 #", "recommendations.merge(movie_titles, how='left', on='movieId' )[['userId', 'movieId', 'title', 'prediction']] # Create column with the predicted", "print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123) # In[10]: # Load movie titles. fp = os.path.join('./data/bronze',", ": (n_users - HELDOUT_USERS)] test_users = unique_users[(n_users - HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)] val_set", "how far recommendation systems have advanced since 2009, and how new model architectures", "numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123) # Binarize train, validation", "if a user reviewed that movie, 0 if they didn't). R = sp.dok_matrix((n_users,", "= top_movies.groupby('userId', sort=False).cumcount() + 1 top_k = top_movies.copy() top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() +", "into training and testing parts val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr, test_data_te", "test_reindex['userId'] - 1 test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']] # Create dictionaries so we", "to COOrdinate format first ((row, column), data) coo = norm_adj_mat.tocoo().astype(np.float32) # create an", "In[31]: for model in [svdpp, svd]: predictions = [] users = train['userId'].unique() items", "pandas as pd import random import requests import scipy.sparse as sp import surprise", "that appear in both the train and test set common_users = set(test['userId']).intersection(set(predictions['userId'])) #", "models.GCN import LightGCN, NGCF # # Prepare data # In[9]: fp = os.path.join('./data/bronze',", "1s temp = train[['userId', 'movieId']].copy() temp['seen'] = 1 # Outer join and remove", "# Create DataFrame with reset index of 0-n_movies. movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new'] =", "recommendations that are also in the test set recs.append(top_k) # # Compare performance", "SVAE # In[28]: # Model prediction on the training part of test set", "= df.groupby('movieId').filter(lambda x: len(x) >= 1) # Obtain both usercount and itemcount after", "os.path.join('./data/bronze', 'u.item') movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1') print(f'Shape:", "to 0-n_users. train_reindex['userId_new'] = train_reindex['userId'] - 1 train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex", "np.mat([coo.row, coo.col]).transpose() # covert to sparse tensor A_tilde = tf.SparseTensor(indices, coo.data, coo.shape) A_tilde", "test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0) # retrieve real ratings from initial", "top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse matrix back to df recommendations = am_test.map_back_sparse(top_k,", "new value item_new=val_map_items.get(item_old) # new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]:", "recommendation systems have advanced since 2009, and how new model architectures with notable", "&& git commit -m \\'commit\\' && git push origin \"{branch}\"') # In[7]: import", "separate object, will be used for calculating NDCG) test_data_te_ratings = test_data_te.copy() test_data_te =", "tester, 'userId', 'movieId', 'rank') ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank') comparison.loc[len(comparison)] =", "conclusion, this demonstrates how far recommendation systems have advanced since 2009, and how", "= test_reindex[['userId_new', 'movieId_new', 'rating']] # Create dictionaries so we can convert to and", "the predictions top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount()", "how LightGCN improves in ranking metrics compared to NGCF by simply removing unnecessary", "of just 1-2 years. # In[32]: model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD']", "A_tilde = tf.SparseTensor(indices, coo.data, coo.shape) A_tilde # # Train models # ## Graph", "# use list of unique items from training set for all sets am_train", "{test.shape}') print(f'Do they have the same users?: {set(train.userId) == set(test.userId)}') # In[16]: combined", "n_users + n_movies), dtype=np.float32) # List of lists. adj_mat.tolil() R = R.tolil() #", "user_new=test_map_users.get(user_old) # new value item_new=test_map_items.get(item_old) # new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old)", "items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain the sparse", "'title', 'prediction']] # Create column with the predicted movie's rank for each user", "SparseTensor where the non-zero points are indices = np.mat([coo.row, coo.col]).transpose() # covert to", "from utils import stratified_split, numpy_stratified_split import build_features import metrics from models import SVAE", "each user, only include movies recommendations that are also in the test set", "= norm_adj_mat.tocoo().astype(np.float32) # create an index that will tell SparseTensor where the non-zero", "config --global user.email \"<EMAIL>\"') get_ipython().system(u'git config --global user.name \"reco-tut\"') get_ipython().system(u'git init') get_ipython().system(u'git remote", "R.T adj_mat # In[19]: # Calculate degree matrix D (for every row count", "tester = test_df if name == 'SVAE' else test pak = metrics.precision_at_k(rec, tester,", "predictions, on=['userId', 'movieId'], how=\"outer\") merged = merged[merged['seen'].isnull()].drop('seen', axis=1) # Create filter for users", "compared to NGCF by simply removing unnecessary operations. # # In conclusion, this", "+ 1 top_k = top_movies.copy() top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 # For", "n_layers = 3) # In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ###", "In[24]: # Convert test user ids to the new ids users = np.array([user2id[x]", "include movies recommendations that are also in the test set recs.append(top_k) # #", "'rank') ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank') comparison.loc[len(comparison)] = [name, pak, rak,", "1 # Outer join and remove movies that have alread been seen (seen=1)", "recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item}) recommendations = recommendations.merge(movie_titles, how='left', on='movieId' )[['userId', 'movieId',", "= pd.merge(test, movie_new, on='movieId', how='left') # Reset index to 0-n_users. test_reindex['userId_new'] = test_reindex['userId']", "Graph Convolution Network (LightGCN) # In[21]: light_model = LightGCN(A_tilde, n_users = n_users, n_items", "(MAP) # * Normalized Discounted Cumulative Gain (NDCG) # # where $k=10$ #", "version in the separate object, will be used for calculating NDCG) test_data_te_ratings =", "achieves an increase in **Percision@k by 29%, Recall@k by 18%, MAP by 12%,", "(GCNs) # ### Light Graph Convolution Network (LightGCN) # In[21]: light_model = LightGCN(A_tilde,", "performance increases can be developed in the span of just 1-2 years. #", "# In conclusion, this demonstrates how far recommendation systems have advanced since 2009,", "improves in ranking metrics compared to NGCF by simply removing unnecessary operations. #", "'movieId', 'title', 'prediction']] # Create column with the predicted movie's rank for each", "sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123) # In[15]:", "we compare different recommendation systems starting with the state-of-the-art LightGCN and going back", "x in test['userId'].unique()]) recs = [] for model in [light_model, ngcf_model]: recommendations =", "### SVD++ # In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0, n_factors=64,", "validation and test sets # use list of unique items from training set", "data: testing part (save non-binary version in the separate object, will be used", "- HELDOUT_USERS)] test_users = unique_users[(n_users - HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)]", "= sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32) # List of lists. adj_mat.tolil()", "movies:', n_movies) # In[17]: # Create DataFrame with reset index of 0-n_movies. movie_new", "lists. adj_mat.tolil() R = R.tolil() # Put together adjacency matrix. Movies and users", "merged[merged['seen'].isnull()].drop('seen', axis=1) # Create filter for users that appear in both the train", "also in the test set recs.append(top_k) # # Compare performance # Looking at", "surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train) # ### SVD", "sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123) # In[10]: # Load", "# 1 if the movie and user are connected. adj_mat[:n_users, n_users:] = R", "(test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is not None) : user_new=test_map_users.get(user_old) # new", "'rating']] # Create dictionaries so we can convert to and from indexes item2id", "from tensorflow.python.framework.ops import disable_eager_execution from tqdm import tqdm from utils import stratified_split, numpy_stratified_split", "validation and test data into training and testing parts val_data_tr, val_data_te = numpy_stratified_split(val_data,", "svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train) # ### SVD # In[30]: svd", "D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: # to COOrdinate format first ((row, column),", "Model prediction on the training part of test set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) #", "Graph Convoultional Networks (GCNs) # ### Light Graph Convolution Network (LightGCN) # In[21]:", "in the test set recs.append(top_k) # ## Singular Value Decomposition (SVD) # ###", "but only by a single year. We can see how LightGCN improves in", "want only the top k for each user, # not all the recommendations.", "and (val_map_items.get(item_old) is not None) : user_new=val_map_users.get(user_old) # new value item_new=val_map_items.get(item_old) # new", "top k recommendations from the predictions top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True)", "for index,i in df_low_rating.iterrows(): user_old= i['userId'] # old value item_old=i['movieId'] # old value", "Netflix Prize competition, LightGCN achieves an increase in **Percision@k by 29%, Recall@k by", "set recs.append(top_k) # ## Standard Variational Autoencoder (SVAE) # In[26]: # Binarize the", "train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]: # Create user-item graph (sparse matix", "with SVD++ and SVD # In[31]: for model in [svdpp, svd]: predictions =", "ranking metrics compared to NGCF by simply removing unnecessary operations. # # In", "old value item_old=i['movieId'] # old value if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old)", "index that will tell SparseTensor where the non-zero points are indices = np.mat([coo.row,", "reviewed that movie, 0 if they didn't). R = sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'],", "Binarize test data: testing part (save non-binary version in the separate object, will", "adjacency matrix. Movies and users are nodes/vertices. # 1 if the movie and", "users are rows and movies are columns. # 1 if a user reviewed", "columns=['userId', 'movieId', 'prediction']) # Remove movies already seen by users # Create column", "is the older sister model to LightGCN, but only by a single year.", "for 2009's Netflix Prize competition, SVD++. # # Models include in order are", "systems starting with the state-of-the-art LightGCN and going back to the winning algorithm", "test['userId'].unique()]) for rec, name in zip(recs, model_names): tester = test_df if name ==", "# In[26]: # Binarize the data (only keep ratings >= 4) df_preferred =", "# Compare performance # Looking at all 5 of our models, we can", "LightGCN and going back to the winning algorithm for 2009's Netflix Prize competition,", "= raw_data[raw_data['rating'] > 3.5] df_low_rating = raw_data[raw_data['rating'] <= 3.5] df = df_preferred.groupby('userId').filter(lambda x:", "Load movie titles. fp = os.path.join('./data/bronze', 'u.item') movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'],", "= 200 # Create train/validation/test users n_users = len(unique_users) train_users = unique_users[:(n_users -", "D^(-0.5) are the diagonals. D_inv_sq_root = sp.diags(D_inv_values) # Eval (D^-0.5 * A *", "= R adj_mat[n_users:, :n_users] = R.T adj_mat # In[19]: # Calculate degree matrix", "part of test set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse matrix back to", "# In[10]: # Load movie titles. fp = os.path.join('./data/bronze', 'u.item') movie_titles = pd.read_csv(fp,", "user, only include movies recommendations that are also in the test set recs.append(top_k)", "vastly outperforms all other models. When compared to SVD++, a widely used algorithm", "ids users = np.array([user2id[x] for x in test['userId'].unique()]) for rec, name in zip(recs,", "validation data val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0) val_data_te_ratings = val_data_te.copy() val_data_te", "by users # Create column of all 1s temp = train[['userId', 'movieId']].copy() temp['seen']", "continue # From the predictions, we want only the top k for each", "= D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: # to COOrdinate format first ((row, column), data) coo", "= unique_users[(n_users - HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)] test_users = unique_users[(n_users", "non-zero points are indices = np.mat([coo.row, coo.col]).transpose() # covert to sparse tensor A_tilde", "test_data_te_, with the original ratings # Create column with the predicted movie's rank", "svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val ) # ### Recommend with SVAE # In[28]:", "and testing parts val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr, test_data_te = numpy_stratified_split(test_data,", "https://arxiv.org/abs/1905.08108 # 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb # 4. <NAME>, Netflix Prize and", "tqdm import tqdm from utils import stratified_split, numpy_stratified_split import build_features import metrics from", "create an index that will tell SparseTensor where the non-zero points are indices", "recs.append(top_k) # ## Singular Value Decomposition (SVD) # ### SVD++ # In[29]: surprise_train", "on='movieId', how='left') # Reset index to 0-n_users. train_reindex['userId_new'] = train_reindex['userId'] - 1 train_reindex", "= \"reco-tut-mlh\"; branch = \"main\"; account = \"sparsh-ai\" project_path = os.path.join('/content', project_name) #", "data train_data = np.where(train_data > 3.5, 1.0, 0.0) val_data = np.where(val_data > 3.5,", "Precision@k # * Recall@k # * Mean Average Precision (MAP) # * Normalized", "user-item graph. adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32) # List", "covert to sparse tensor A_tilde = tf.SparseTensor(indices, coo.data, coo.shape) A_tilde # # Train", "on=['userId', 'movieId'], how=\"outer\") merged = merged[merged['seen'].isnull()].drop('seen', axis=1) # Create filter for users that", "= am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix() # Split validation and test data", "= raw_data[raw_data['rating'] <= 3.5] df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5) df =", "rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank') map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId',", "= df_preferred.groupby('userId').filter(lambda x: len(x) >= 5) df = df.groupby('movieId').filter(lambda x: len(x) >= 1)", "svd.fit(surprise_train) # ### Recommend with SVD++ and SVD # In[31]: for model in", "n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train) # ### SVD # In[30]: svd = surprise.SVD(random_state=0, n_factors=64,", "dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id = dict(zip(train['userId'], train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]: #", "and user are connected. adj_mat[:n_users, n_users:] = R adj_mat[n_users:, :n_users] = R.T adj_mat", "np.where(val_data_te > 3.5, 1.0, 0.0) # Binarize test data: training part test_data_tr =", "axis=1) # Create filter for users that appear in both the train and", "= dict(zip(train['userId'], train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]: # Create user-item graph", "3 ) ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Recommend with LightGCN and NGCF #", "D_values = np.array(adj_mat.sum(1)) # Square root and inverse. D_inv_values = np.power(D_values + 1e-9,", ")[['userId', 'movieId', 'title', 'prediction']] # Create column with the predicted movie's rank for", "train_reindex['userId_new'] = train_reindex['userId'] - 1 train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex = pd.merge(test,", "# The last cell compares the performance of the different models using ranking", "'movieId_new', 'rating']] test_reindex = pd.merge(test, movie_new, on='movieId', how='left') # Reset index to 0-n_users.", "1.0, 0.0) test_data = np.where(test_data > 3.5, 1.0, 0.0) # Binarize validation data", "\"{branch}\"') # In[7]: import sys sys.path.insert(0, './code') # --- # # Collaborative Filtering", "df[['userId']].groupby('userId', as_index = False).size() itemcount = df[['movieId']].groupby('movieId', as_index = False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123)", "# Model prediction on the training part of test set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True)", "with notable performance increases can be developed in the span of just 1-2", "for model in [light_model, ngcf_model]: recommendations = model.recommend(users, k=10) recommendations = recommendations.replace({'userId': id2user,", "new model architectures with notable performance increases can be developed in the span", "advanced since 2009, and how new model architectures with notable performance increases can", "Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108 # 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb # 4.", "= df[['userId']].groupby('userId', as_index = False).size() itemcount = df[['movieId']].groupby('movieId', as_index = False).size() unique_users =sorted(df.userId.unique())", "val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the sparse matrix generation for train, validation", "movie_new, on='movieId', how='left') # Reset index to 0-n_users. test_reindex['userId_new'] = test_reindex['userId'] - 1", "metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank') map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank') ndcg", "Recommend with SVAE # In[28]: # Model prediction on the training part of", "for item in items: predictions.append([user, item, model.predict(user, item).est]) predictions = pd.DataFrame(predictions, columns=['userId', 'movieId',", "adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32) # List of lists.", "and NGCF, where we implemented them from scratch in Tensorflow. # # The", "how='left', on='movieId' )[['userId', 'movieId', 'title', 'prediction']] # Create column with the predicted movie's", "build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain the sparse matrix for train, validation and test sets", "every row count the number of nonzero entries) D_values = np.array(adj_mat.sum(1)) # Square", "recs.append(top_k) # ## Standard Variational Autoencoder (SVAE) # In[26]: # Binarize the data", "= os.path.join('/content', project_name) # In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys", "include in order are LightGCN, NGCF, SVAE, SVD++, and SVD. Each model has", "Decomposition (SVD) # ### SVD++ # In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp", "build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain the sparse matrix for train,", "(save non-binary version in the separate object, will be used for calculating NDCG)", "In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Neural Graph Collaborative Filtering", "4) df_preferred = raw_data[raw_data['rating'] > 3.5] df_low_rating = raw_data[raw_data['rating'] <= 3.5] df =", "are also in the test set recs.append(top_k) # # Compare performance # Looking", "# * Recall@k # * Mean Average Precision (MAP) # * Normalized Discounted", "[light_model, ngcf_model]: recommendations = model.recommend(users, k=10) recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item}) recommendations", "set(test['userId']).intersection(set(predictions['userId'])) # Filter the test and predictions so they have the same users", "seed=123) # Binarize train, validation and test data train_data = np.where(train_data > 3.5,", "2009, and how new model architectures with notable performance increases can be developed", "recs.append(top_k) # # Compare performance # Looking at all 5 of our models,", "norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: # to COOrdinate format first ((row, column), data)", "to the winning algorithm for 2009's Netflix Prize competition, SVD++. # # Models", "[] users = train['userId'].unique() items = train['movieId'].unique() for user in users: for item", "0.0) # Binarize validation data val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0) val_data_te_ratings", "axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train) # ### SVD #", "= surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train) # ### Recommend with SVD++ and SVD", "{train.shape}') print(f'Test Shape: {test.shape}') print(f'Do they have the same users?: {set(train.userId) == set(test.userId)}')", "'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123) # In[10]: # Load movie titles. fp =", "# Binarize validation data val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0) val_data_te_ratings =", "The last cell compares the performance of the different models using ranking metrics:", "import math import numpy as np import os import pandas as pd import", "'userId', 'movieId', 'rank') map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank') ndcg = metrics.ndcg(rec,", "if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is not None) : user_new=test_map_users.get(user_old) #", "LightGCN, NGCF, SVAE, SVD++, and SVD. Each model has their own individual notebooks", "if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm /content/mykeys.py') path = \"/content/\"", "train, validation and test sets # use list of unique items from training", "> 3.5, 1.0, 0.0) # Binarize validation data val_data_tr = np.where(val_data_tr > 3.5,", "Looking at all 5 of our models, we can see that the state-of-the-art", "val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the sparse matrix generation for", "n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1 # Create the adjaceny matrix with the", "In[33]: comparison # # References: # # 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>", "train and test are NOT equal') print(f\"# of users in train and test", "3.5, 1.0, 0.0) val_data = np.where(val_data > 3.5, 1.0, 0.0) test_data = np.where(test_data", "user.name \"reco-tut\"') get_ipython().system(u'git init') get_ipython().system(u'git remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull origin \"{branch}\"')", "get_ipython().system(u'git status') # In[35]: get_ipython().system(u'git add . && git commit -m \\'commit\\' &&", "of unique items from training set for all sets am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items)", "the number of nonzero entries) D_values = np.array(adj_mat.sum(1)) # Square root and inverse.", "& <NAME>, LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126", "Imports # In[4]: get_ipython().system(u'pip install -q surprise') # In[8]: import math import numpy", "requests import scipy.sparse as sp import surprise import tensorflow as tf from sklearn.model_selection", "in order are LightGCN, NGCF, SVAE, SVD++, and SVD. Each model has their", "rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400,", "# In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm /content/mykeys.py') path", "Average Precision (MAP) # * Normalized Discounted Cumulative Gain (NDCG) # # where", "= 0.75 train, test = stratified_split(raw_data, 'userId', train_size) print(f'Train Shape: {train.shape}') print(f'Test Shape:", "import scipy.sparse as sp import surprise import tensorflow as tf from sklearn.model_selection import", "# In[34]: get_ipython().system(u'git status') # In[35]: get_ipython().system(u'git add . && git commit -m", "only by a single year. We can see how LightGCN improves in ranking", "the new ids users = np.array([user2id[x] for x in test['userId'].unique()]) for rec, name", "their own individual notebooks where we go more indepth, especially LightGCN and NGCF,", "the user-item graph. adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32) #", "Prize competition, SVD++. # # Models include in order are LightGCN, NGCF, SVAE,", "3.5] df_low_rating = raw_data[raw_data['rating'] <= 3.5] df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5)", "them test_common = test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) != len(set(test['userId'])): print('Number of", "Recommendation, 2020, https://arxiv.org/abs/2002.02126 # 2. <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>, Neural Graph", "that will tell SparseTensor where the non-zero points are indices = np.mat([coo.row, coo.col]).transpose()", "the values of D^(-0.5) are the diagonals. D_inv_sq_root = sp.diags(D_inv_values) # Eval (D^-0.5", "model has their own individual notebooks where we go more indepth, especially LightGCN", "git push origin \"{branch}\"') # In[7]: import sys sys.path.insert(0, './code') # --- #", "of movies:', n_movies) # In[17]: # Create DataFrame with reset index of 0-n_movies.", "state-of-the-art LightGCN and going back to the winning algorithm for 2009's Netflix Prize", "temp['seen'] = 1 # Outer join and remove movies that have alread been", "print('Number of movies:', n_movies) # In[17]: # Create DataFrame with reset index of", "2) : (n_users - HELDOUT_USERS)] test_users = unique_users[(n_users - HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)]", "import numpy as np import os import pandas as pd import random import", "rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200,", "to sparse tensor A_tilde = tf.SparseTensor(indices, coo.data, coo.shape) A_tilde # # Train models", "# In[4]: get_ipython().system(u'pip install -q surprise') # In[8]: import math import numpy as", "# Put together adjacency matrix. Movies and users are nodes/vertices. # 1 if", "not all the recommendations. # Extract the top k recommendations from the predictions", "Cumulative Gain (NDCG) # # where $k=10$ # # # # Imports #", "stratified_split, numpy_stratified_split import build_features import metrics from models import SVAE from models.GCN import", "# Reset index to 0-n_users. test_reindex['userId_new'] = test_reindex['userId'] - 1 test_reindex = test_reindex[['userId_new',", "used algorithm during the Netflix Prize competition, LightGCN achieves an increase in **Percision@k", "\\'commit\\' && git push origin \"{branch}\"') # In[7]: import sys sys.path.insert(0, './code') #", "sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1 # Create the adjaceny matrix with", "test sets # use list of unique items from training set for all", "np.where(val_data > 3.5, 1.0, 0.0) test_data = np.where(test_data > 3.5, 1.0, 0.0) #", "sys.path.append(path) get_ipython().system(u'git config --global user.email \"<EMAIL>\"') get_ipython().system(u'git config --global user.name \"reco-tut\"') get_ipython().system(u'git init')", "R = sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1 # Create the adjaceny", "# In[28]: # Model prediction on the training part of test set top_k", "test set recs.append(top_k) # ## Standard Variational Autoencoder (SVAE) # In[26]: # Binarize", "x: len(x) >= 5) df = df.groupby('movieId').filter(lambda x: len(x) >= 1) # Obtain", "and test sets # use list of unique items from training set for", "in items: predictions.append([user, item, model.predict(user, item).est]) predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction']) #", "filtering usercount = df[['userId']].groupby('userId', as_index = False).size() itemcount = df[['movieId']].groupby('movieId', as_index = False).size()", "the test set recs.append(top_k) # ## Standard Variational Autoencoder (SVAE) # In[26]: #", "itemcount after filtering usercount = df[['userId']].groupby('userId', as_index = False).size() itemcount = df[['movieId']].groupby('movieId', as_index", "1-2 years. # In[32]: model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD'] comparison =", "years. # In[32]: model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD'] comparison = pd.DataFrame(columns=['Algorithm',", "3.5, 1.0, 0.0) # Binarize test data: training part test_data_tr = np.where(test_data_tr >", "to the new ids users = np.array([user2id[x] for x in test['userId'].unique()]) for rec,", "set recs.append(top_k) # ## Singular Value Decomposition (SVD) # ### SVD++ # In[29]:", "test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0) # Binarize test data: testing part", "= metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank') comparison.loc[len(comparison)] = [name, pak, rak, map, ndcg]", "- HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)] unique_train_items =", "'SVAE' else test pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank') rak = metrics.recall_at_k(rec,", "increase in **Percision@k by 29%, Recall@k by 18%, MAP by 12%, and NDCG", "unique_users[:(n_users - HELDOUT_USERS * 2)] val_users = unique_users[(n_users - HELDOUT_USERS * 2) :", "are connected. adj_mat[:n_users, n_users:] = R adj_mat[n_users:, :n_users] = R.T adj_mat # In[19]:", "validation and test data train_data = np.where(train_data > 3.5, 1.0, 0.0) val_data =", "ratings from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in df_low_rating.iterrows(): user_old= i['userId'] #", "D_inv_sq_root = sp.diags(D_inv_values) # Eval (D^-0.5 * A * D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root)", "in the separate object, will be used for calculating NDCG) test_data_te_ratings = test_data_te.copy()", "= np.arange(len(movie_new)) train_reindex = pd.merge(train, movie_new, on='movieId', how='left') # Reset index to 0-n_users.", "== set(test.userId)}') # In[16]: combined = train.append(test) n_users = combined['userId'].nunique() print('Number of users:',", "user top_k = recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each", "Convert test user ids to the new ids users = np.array([user2id[x] for x", "# # 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, LightGCN: Simplifying and", "D_inv_values = np.power(D_values + 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0 # Create sparse matrix", "# In[7]: import sys sys.path.insert(0, './code') # --- # # Collaborative Filtering Comparison", "/content/mykeys.py') path = \"/content/\" + project_name; get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd \"{path}\"') import sys; sys.path.append(path)", "only include movies recommendations that are also in the test set recs.append(top_k) #", "the state-of-the-art LightGCN and going back to the winning algorithm for 2009's Netflix", "matrix generation for train, validation and test sets # use list of unique", "= n_movies, n_layers = 3 ) ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Recommend with", "and users are nodes/vertices. # 1 if the movie and user are connected.", "model_names): tester = test_df if name == 'SVAE' else test pak = metrics.precision_at_k(rec,", "(test_map_items.get(item_old) is not None) : user_new=test_map_users.get(user_old) # new value item_new=test_map_items.get(item_old) # new value", "3.5, 1.0, 0.0) test_data = np.where(test_data > 3.5, 1.0, 0.0) # Binarize validation", "tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.python.framework.ops import disable_eager_execution from tqdm", "where users are rows and movies are columns. # 1 if a user", "### Recommend with LightGCN and NGCF # In[24]: # Convert test user ids", "'movieId']].copy() temp['seen'] = 1 # Outer join and remove movies that have alread", "= am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix() #", "np.random.permutation(unique_users) HELDOUT_USERS = 200 # Create train/validation/test users n_users = len(unique_users) train_users =", "alread been seen (seen=1) merged = pd.merge(temp, predictions, on=['userId', 'movieId'], how=\"outer\") merged =", "= test_data_te.copy() test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0) # retrieve real ratings", "names=['userId', 'movieId', 'rating', 'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123) # In[10]: # Load movie", "users?: {set(train.userId) == set(test.userId)}') # In[16]: combined = train.append(test) n_users = combined['userId'].nunique() print('Number", "Network (LightGCN) # In[21]: light_model = LightGCN(A_tilde, n_users = n_users, n_items = n_movies,", "test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix() # Split validation and test data into training", "Binarize train, validation and test data train_data = np.where(train_data > 3.5, 1.0, 0.0)", "unique items from training set for all sets am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val", "In[35]: get_ipython().system(u'git add . && git commit -m \\'commit\\' && git push origin", "test set common_users = set(test['userId']).intersection(set(predictions['userId'])) # Filter the test and predictions so they", "# Create train/validation/test users n_users = len(unique_users) train_users = unique_users[:(n_users - HELDOUT_USERS *", "convert to and from indexes item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId']))", "# # NGCF is the older sister model to LightGCN, but only by", "'movieId': id2item}) recommendations = recommendations.merge(movie_titles, how='left', on='movieId' )[['userId', 'movieId', 'title', 'prediction']] # Create", "svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse matrix back to df recommendations = am_test.map_back_sparse(top_k, kind='prediction') test_df", "= test_df if name == 'SVAE' else test pak = metrics.precision_at_k(rec, tester, 'userId',", "pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the sparse matrix generation", "outperforms all other models. When compared to SVD++, a widely used algorithm during", "we want only the top k for each user, # not all the", "(SVAE) # In[26]: # Binarize the data (only keep ratings >= 4) df_preferred", "in [svdpp, svd]: predictions = [] users = train['userId'].unique() items = train['movieId'].unique() for", "can see how LightGCN improves in ranking metrics compared to NGCF by simply", "ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank') comparison.loc[len(comparison)] = [name, pak, rak, map,", "optimizer=optimizer) # ### Recommend with LightGCN and NGCF # In[24]: # Convert test", "0.0) test_data = np.where(test_data > 3.5, 1.0, 0.0) # Binarize validation data val_data_tr", "competition, SVD++. # # Models include in order are LightGCN, NGCF, SVAE, SVD++,", "sparse matrix generation for train, validation and test sets # use list of", "the predictions, we want only the top k for each user, # not", "sys.path.insert(0, './code') # --- # # Collaborative Filtering Comparison # # In this", "12%, and NDCG by 35%**. # # NGCF is the older sister model", "origin \"{branch}\"') # In[7]: import sys sys.path.insert(0, './code') # --- # # Collaborative", "remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull origin \"{branch}\"') get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd", "model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD'] comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP',", "unnecessary operations. # # In conclusion, this demonstrates how far recommendation systems have", "seen by users # Create column of all 1s temp = train[['userId', 'movieId']].copy()", "# 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, LightGCN: Simplifying and Powering", "adjaceny matrix with the user-item graph. adj_mat = sp.dok_matrix((n_users + n_movies, n_users +", "diagonals. D_inv_sq_root = sp.diags(D_inv_values) # Eval (D^-0.5 * A * D^-0.5). norm_adj_mat =", "n_movies, n_layers = 3) # In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) #", "dtype=np.float32) # List of lists. adj_mat.tolil() R = R.tolil() # Put together adjacency", "drop_decoder=0.5, annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val ) # ### Recommend", "item).est]) predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction']) # Remove movies already seen by", "all other models. When compared to SVD++, a widely used algorithm during the", "> 3.5, 1.0, 0.0) val_data_te_ratings = val_data_te.copy() val_data_te = np.where(val_data_te > 3.5, 1.0,", "# In[8]: import math import numpy as np import os import pandas as", "raw_data.sample(10, random_state=123) # In[10]: # Load movie titles. fp = os.path.join('./data/bronze', 'u.item') movie_titles", "'SVD++', 'SVD'] comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']) # Convert test user", "print(f'Do they have the same users?: {set(train.userId) == set(test.userId)}') # In[16]: combined =", "# # Collaborative Filtering Comparison # # In this notebook we compare different", "0.0 # Create sparse matrix with the values of D^(-0.5) are the diagonals.", "data # In[9]: fp = os.path.join('./data/bronze', 'u.data') raw_data = pd.read_csv(fp, sep='\\t', names=['userId', 'movieId',", "column of all 1s temp = train[['userId', 'movieId']].copy() temp['seen'] = 1 # Outer", "# For each user, only include movies recommendations that are also in the", "user2id = dict(zip(train['userId'], train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]: # Create user-item", "= pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123)", "movies recommendations that are also in the test set recs.append(top_k) # ## Singular", "users in train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"# of users in BOTH", "a user reviewed that movie, 0 if they didn't). R = sp.dok_matrix((n_users, n_movies),", "are columns. # 1 if a user reviewed that movie, 0 if they", "install -q surprise') # In[8]: import math import numpy as np import os", "# 1 if a user reviewed that movie, 0 if they didn't). R", "if name == 'SVAE' else test pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank')", "n_epochs=10, verbose=True) svd.fit(surprise_train) # ### Recommend with SVD++ and SVD # In[31]: for", "Create sparse matrix with the values of D^(-0.5) are the diagonals. D_inv_sq_root =", "reset index of 0-n_movies. movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex = pd.merge(train,", "Collaborative Filtering Comparison # # In this notebook we compare different recommendation systems", "individual notebooks where we go more indepth, especially LightGCN and NGCF, where we", "column with the predicted movie's rank for each user top_k = recommendations.copy() top_k['rank']", "recommendations that are also in the test set recs.append(top_k) # ## Standard Variational", "In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train)", "Precision (MAP) # * Normalized Discounted Cumulative Gain (NDCG) # # where $k=10$", "are the diagonals. D_inv_sq_root = sp.diags(D_inv_values) # Eval (D^-0.5 * A * D^-0.5).", "if the movie and user are connected. adj_mat[:n_users, n_users:] = R adj_mat[n_users:, :n_users]", "ratio=0.75, seed=123) test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123) # Binarize train, validation and", "Each model has their own individual notebooks where we go more indepth, especially", "'prediction']) # Remove movies already seen by users # Create column of all", "# In[31]: for model in [svdpp, svd]: predictions = [] users = train['userId'].unique()", "'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 top_k = top_movies.copy() top_k['rank'] = top_movies.groupby('userId',", "that movie, 0 if they didn't). R = sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']]", "as np import os import pandas as pd import random import requests import", "new ids users = np.array([user2id[x] for x in test['userId'].unique()]) recs = [] for", "tester, 'userId', 'movieId', 'rank') map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank') ndcg =", "test = stratified_split(raw_data, 'userId', train_size) print(f'Train Shape: {train.shape}') print(f'Test Shape: {test.shape}') print(f'Do they", "where we implemented them from scratch in Tensorflow. # # The last cell", "LightGCN, NGCF # # Prepare data # In[9]: fp = os.path.join('./data/bronze', 'u.data') raw_data", "pd.merge(train, movie_new, on='movieId', how='left') # Reset index to 0-n_users. train_reindex['userId_new'] = train_reindex['userId'] -", "# to COOrdinate format first ((row, column), data) coo = norm_adj_mat.tocoo().astype(np.float32) # create", "\"{branch}\"') get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd \"{project_path}\"') # In[34]: get_ipython().system(u'git status') # In[35]:", "not None) and (test_map_items.get(item_old) is not None) : user_new=test_map_users.get(user_old) # new value item_new=test_map_items.get(item_old)", "from training set for all sets am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set,", "graph (sparse matix where users are rows and movies are columns. # 1", "Train models # ## Graph Convoultional Networks (GCNs) # ### Light Graph Convolution", "# retrieve real ratings from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in df_low_rating.iterrows():", "# # * Precision@k # * Recall@k # * Mean Average Precision (MAP)", "see how LightGCN improves in ranking metrics compared to NGCF by simply removing", "# 2. <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>, Neural Graph Collaorative Filtering, 2019,", "test data: training part test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0) # Binarize", "# * Mean Average Precision (MAP) # * Normalized Discounted Cumulative Gain (NDCG)", "users:', n_users) n_movies = combined['movieId'].nunique() print('Number of movies:', n_movies) # In[17]: # Create", "In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm /content/mykeys.py') path =", "the movie and user are connected. adj_mat[:n_users, n_users:] = R adj_mat[n_users:, :n_users] =", "ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Recommend with LightGCN and NGCF # In[24]: #", "each user, # not all the recommendations. # Extract the top k recommendations", "# # Prepare data # In[9]: fp = os.path.join('./data/bronze', 'u.data') raw_data = pd.read_csv(fp,", "HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)] test_users = unique_users[(n_users - HELDOUT_USERS):] train_set", "Powering Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126 # 2. <NAME>, <NAME>, <NAME>,", "using ranking metrics: # # # * Precision@k # * Recall@k # *", "metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank') rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank') map", "Models include in order are LightGCN, NGCF, SVAE, SVD++, and SVD. Each model", "2009's Netflix Prize competition, SVD++. # # Models include in order are LightGCN,", "# Remove movies already seen by users # Create column of all 1s", "+ n_movies), dtype=np.float32) # List of lists. adj_mat.tolil() R = R.tolil() # Put", "mapper=am_val ) # ### Recommend with SVAE # In[28]: # Model prediction on", "a single year. We can see how LightGCN improves in ranking metrics compared", "val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100,", "* 2) : (n_users - HELDOUT_USERS)] test_users = unique_users[(n_users - HELDOUT_USERS):] train_set =", "also in the test set recs.append(top_k) # ## Singular Value Decomposition (SVD) #", "tester, 'userId', 'movieId', 'rank') rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank') map =", "light_model = LightGCN(A_tilde, n_users = n_users, n_items = n_movies, n_layers = 3) #", "= pd.merge(temp, predictions, on=['userId', 'movieId'], how=\"outer\") merged = merged[merged['seen'].isnull()].drop('seen', axis=1) # Create filter", "test_data_te_ratings = test_data_te.copy() test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0) # retrieve real", "None) : user_new=test_map_users.get(user_old) # new value item_new=test_map_items.get(item_old) # new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating", "= tf.SparseTensor(indices, coo.data, coo.shape) A_tilde # # Train models # ## Graph Convoultional", "# In[33]: comparison # # References: # # 1. <NAME>, <NAME>, <NAME>, <NAME>,", "LightGCN achieves an increase in **Percision@k by 29%, Recall@k by 18%, MAP by", "equal') print(f\"# of users in train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"# of", "status') # In[35]: get_ipython().system(u'git add . && git commit -m \\'commit\\' && git", "tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Neural Graph Collaborative Filtering (NGCF) # In[23]:", "movie titles. fp = os.path.join('./data/bronze', 'u.item') movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols", "LightGCN and NGCF # In[24]: # Convert test user ids to the new", "len(x) >= 5) df = df.groupby('movieId').filter(lambda x: len(x) >= 1) # Obtain both", "list of unique items from training set for all sets am_train = build_features.AffinityMatrix(df=train_set,", "batch_size=1024, optimizer=optimizer) # ### Neural Graph Collaborative Filtering (NGCF) # In[23]: ngcf_model =", "part test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0) # Binarize test data: testing", "where $k=10$ # # # # Imports # In[4]: get_ipython().system(u'pip install -q surprise')", "* Mean Average Precision (MAP) # * Normalized Discounted Cumulative Gain (NDCG) #", "# Extract the top k recommendations from the predictions top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda", "to and from indexes item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id", "In[32]: model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD'] comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k',", "'movieId', 'prediction']) # Remove movies already seen by users # Create column of", "# # Train models # ## Graph Convoultional Networks (GCNs) # ### Light", "pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123) #", "import requests import scipy.sparse as sp import surprise import tensorflow as tf from", "usecols = range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123) # In[15]: train_size = 0.75", "combined = train.append(test) n_users = combined['userId'].nunique() print('Number of users:', n_users) n_movies = combined['movieId'].nunique()", "= am_test.map_back_sparse(top_k, kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the original", "# Square root and inverse. D_inv_values = np.power(D_values + 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] =", "2)] val_users = unique_users[(n_users - HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)] test_users", "df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the", "train_data = np.where(train_data > 3.5, 1.0, 0.0) val_data = np.where(val_data > 3.5, 1.0,", "= top_movies.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations", "= np.where(val_data > 3.5, 1.0, 0.0) test_data = np.where(test_data > 3.5, 1.0, 0.0)", "so we can convert to and from indexes item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item", "are LightGCN, NGCF, SVAE, SVD++, and SVD. Each model has their own individual", "NGCF # In[24]: # Convert test user ids to the new ids users", "2. <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>, Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108", ":n_users] = R.T adj_mat # In[19]: # Calculate degree matrix D (for every", "'NGCF', 'SVAE', 'SVD++', 'SVD'] comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']) # Convert", "# In[30]: svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train) # ### Recommend with", "'rank') rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank') map = metrics.mean_average_precision(rec, tester, 'userId',", "'userId', 'movieId', 'rank') rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank') map = metrics.mean_average_precision(rec,", "object, will be used for calculating NDCG) test_data_te_ratings = test_data_te.copy() test_data_te = np.where(test_data_te", "sort=False).cumcount() + 1 # For each user, only include movies recommendations that are", "get_ipython().system(u'git config --global user.name \"reco-tut\"') get_ipython().system(u'git init') get_ipython().system(u'git remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git", "Networks (GCNs) # ### Light Graph Convolution Network (LightGCN) # In[21]: light_model =", "Normalized Discounted Cumulative Gain (NDCG) # # where $k=10$ # # # #", "# ## Standard Variational Autoencoder (SVAE) # In[26]: # Binarize the data (only", "= train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex = pd.merge(test, movie_new, on='movieId', how='left') # Reset index", "* D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: # to COOrdinate format first ((row,", "df_low_rating = raw_data[raw_data['rating'] <= 3.5] df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5) df", "recommendations = am_test.map_back_sparse(top_k, kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the", "temp = train[['userId', 'movieId']].copy() temp['seen'] = 1 # Outer join and remove movies", "= [] users = train['userId'].unique() items = train['movieId'].unique() for user in users: for", "'movieId'], how=\"outer\") merged = merged[merged['seen'].isnull()].drop('seen', axis=1) # Create filter for users that appear", "test data into training and testing parts val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123)", "recommendations = model.recommend(users, k=10) recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item}) recommendations = recommendations.merge(movie_titles,", "Graph Collaborative Filtering (NGCF) # In[23]: ngcf_model = NGCF(A_tilde, n_users = n_users, n_items", "+ project_name; get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd \"{path}\"') import sys; sys.path.append(path) get_ipython().system(u'git config --global user.email", "### Recommend with SVD++ and SVD # In[31]: for model in [svdpp, svd]:", "of nonzero entries) D_values = np.array(adj_mat.sum(1)) # Square root and inverse. D_inv_values =", "just 1-2 years. # In[32]: model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD'] comparison", "be used for calculating NDCG) test_data_te_ratings = test_data_te.copy() test_data_te = np.where(test_data_te > 3.5,", "ratings # Create column with the predicted movie's rank for each user top_k", "print('Number of users:', n_users) n_movies = combined['movieId'].nunique() print('Number of movies:', n_movies) # In[17]:", "3) # In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Neural Graph", "and Powering Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126 # 2. <NAME>, <NAME>,", "= np.where(val_data_te > 3.5, 1.0, 0.0) # Binarize test data: training part test_data_tr", "get_ipython().system(u'git init') get_ipython().system(u'git remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull origin \"{branch}\"') get_ipython().system(u'git checkout", "# # In this notebook we compare different recommendation systems starting with the", "the test set recs.append(top_k) # ## Singular Value Decomposition (SVD) # ### SVD++", "build_features import metrics from models import SVAE from models.GCN import LightGCN, NGCF #", "= np.power(D_values + 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0 # Create sparse matrix with", "= train['userId'].unique() items = train['movieId'].unique() for user in users: for item in items:", "tensorflow.python.framework.ops import disable_eager_execution from tqdm import tqdm from utils import stratified_split, numpy_stratified_split import", "(NGCF) # In[23]: ngcf_model = NGCF(A_tilde, n_users = n_users, n_items = n_movies, n_layers", "Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb # 4. <NAME>, Netflix Prize and SVD, 2014, https://www.semanticscholar.org/paper/Netflix-Prize-and-SVD-Gower/ce7b81b46939d7852dbb30538a7796e69fdd407c", "# In[15]: train_size = 0.75 train, test = stratified_split(raw_data, 'userId', train_size) print(f'Train Shape:", "<NAME>, <NAME>, & <NAME>, Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108 # 3. Microsoft", "[svdpp, svd]: predictions = [] users = train['userId'].unique() items = train['movieId'].unique() for user", "= recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations", "# Models include in order are LightGCN, NGCF, SVAE, SVD++, and SVD. Each", ">= 5) df = df.groupby('movieId').filter(lambda x: len(x) >= 1) # Obtain both usercount", "by 12%, and NDCG by 35%**. # # NGCF is the older sister", "Collaborative Filtering (NGCF) # In[23]: ngcf_model = NGCF(A_tilde, n_users = n_users, n_items =", "coo.col]).transpose() # covert to sparse tensor A_tilde = tf.SparseTensor(indices, coo.data, coo.shape) A_tilde #", "training and testing parts val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr, test_data_te =", "= False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users = np.random.permutation(unique_users) HELDOUT_USERS = 200 # Create", "how new model architectures with notable performance increases can be developed in the", "'userId', 'movieId', 'rank') ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank') comparison.loc[len(comparison)] = [name,", "in users: for item in items: predictions.append([user, item, model.predict(user, item).est]) predictions = pd.DataFrame(predictions,", "val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64,", "and test data train_data = np.where(train_data > 3.5, 1.0, 0.0) val_data = np.where(val_data", "non-binary version in the separate object, will be used for calculating NDCG) test_data_te_ratings", "False).size() itemcount = df[['movieId']].groupby('movieId', as_index = False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users = np.random.permutation(unique_users)", "= surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train) # ###", "SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb # 4. <NAME>, Netflix Prize and SVD, 2014, https://www.semanticscholar.org/paper/Netflix-Prize-and-SVD-Gower/ce7b81b46939d7852dbb30538a7796e69fdd407c #", "# Binarize test data: training part test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0)", "movie's rank for each user top_k = recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() +", "of users in train and test are NOT equal') print(f\"# of users in", "test['userId'].unique()]) recs = [] for model in [light_model, ngcf_model]: recommendations = model.recommend(users, k=10)", "of 0-n_movies. movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex = pd.merge(train, movie_new, on='movieId',", "with reset index of 0-n_movies. movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex =", "filter for users that appear in both the train and test set common_users", "df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)]", "= 0.0 # Create sparse matrix with the values of D^(-0.5) are the", "pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']) # Convert test user ids to the new", "import build_features import metrics from models import SVAE from models.GCN import LightGCN, NGCF", "norm_adj_mat.tocoo().astype(np.float32) # create an index that will tell SparseTensor where the non-zero points", "x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 top_k = top_movies.copy() top_k['rank']", "see that the state-of-the-art model LightGCN vastly outperforms all other models. When compared", "retrieve real ratings from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in df_low_rating.iterrows(): user_old=", "# Outer join and remove movies that have alread been seen (seen=1) merged", "df[['movieId']].groupby('movieId', as_index = False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users = np.random.permutation(unique_users) HELDOUT_USERS = 200", "'rank') comparison.loc[len(comparison)] = [name, pak, rak, map, ndcg] # In[33]: comparison # #", "(val_map_items.get(item_old) is not None) : user_new=val_map_users.get(user_old) # new value item_new=val_map_items.get(item_old) # new value", "can see that the state-of-the-art model LightGCN vastly outperforms all other models. When", "np.array(adj_mat.sum(1)) # Square root and inverse. D_inv_values = np.power(D_values + 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)]", "data) coo = norm_adj_mat.tocoo().astype(np.float32) # create an index that will tell SparseTensor where", "in train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"# of users in BOTH train", "for each user top_k = recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 #", "predictions top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() +", "by simply removing unnecessary operations. # # In conclusion, this demonstrates how far", "LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126 # 2.", "1 # Create the adjaceny matrix with the user-item graph. adj_mat = sp.dok_matrix((n_users", "test_map_items = am_test.gen_affinity_matrix() # Split validation and test data into training and testing", "branch = \"main\"; account = \"sparsh-ai\" project_path = os.path.join('/content', project_name) # In[2]: if", "zip(recs, model_names): tester = test_df if name == 'SVAE' else test pak =", "test: {len(set(svd_pred_common['userId']))}\") continue # From the predictions, we want only the top k", "'movieId', 'rating', 'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123) # In[10]: # Load movie titles.", "== 'SVAE' else test pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank') rak =", "from sklearn.model_selection import train_test_split from tensorflow.python.framework.ops import disable_eager_execution from tqdm import tqdm from", "Value Decomposition (SVD) # ### SVD++ # In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset()", "movies are columns. # 1 if a user reviewed that movie, 0 if", "Split validation and test data into training and testing parts val_data_tr, val_data_te =", "Netflix Prize competition, SVD++. # # Models include in order are LightGCN, NGCF,", "= dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id = dict(zip(train['userId'], train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]:", "np.arange(len(movie_new)) train_reindex = pd.merge(train, movie_new, on='movieId', how='left') # Reset index to 0-n_users. train_reindex['userId_new']", "n_users:] = R adj_mat[n_users:, :n_users] = R.T adj_mat # In[19]: # Calculate degree", "movies that have alread been seen (seen=1) merged = pd.merge(temp, predictions, on=['userId', 'movieId'],", "# Eval (D^-0.5 * A * D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: #", "Create train/validation/test users n_users = len(unique_users) train_users = unique_users[:(n_users - HELDOUT_USERS * 2)]", "and test sets train_data, _, _ = am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix()", "format first ((row, column), data) coo = norm_adj_mat.tocoo().astype(np.float32) # create an index that", "new ids users = np.array([user2id[x] for x in test['userId'].unique()]) for rec, name in", "'u.item') movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}')", "get_ipython().system(u'git config --global user.email \"<EMAIL>\"') get_ipython().system(u'git config --global user.name \"reco-tut\"') get_ipython().system(u'git init') get_ipython().system(u'git", ") svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val ) # ### Recommend with SVAE #", "from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in df_low_rating.iterrows(): user_old= i['userId'] # old", "k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val", "user_new=val_map_users.get(user_old) # new value item_new=val_map_items.get(item_old) # new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy()", "NGCF, SVAE, SVD++, and SVD. Each model has their own individual notebooks where", "# In[9]: fp = os.path.join('./data/bronze', 'u.data') raw_data = pd.read_csv(fp, sep='\\t', names=['userId', 'movieId', 'rating',", "items_list=unique_train_items) # Obtain the sparse matrix for train, validation and test sets train_data,", "init') get_ipython().system(u'git remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull origin \"{branch}\"') get_ipython().system(u'git checkout main')", "print(f\"# of users in BOTH train and test: {len(set(svd_pred_common['userId']))}\") continue # From the", "Create DataFrame with reset index of 0-n_movies. movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new))", "model.predict(user, item).est]) predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction']) # Remove movies already seen", "x_val_te=val_data_te_ratings, mapper=am_val ) # ### Recommend with SVAE # In[28]: # Model prediction", "columns. # 1 if a user reviewed that movie, 0 if they didn't).", "value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1],", "Create column of all 1s temp = train[['userId', 'movieId']].copy() temp['seen'] = 1 #", "* Recall@k # * Mean Average Precision (MAP) # * Normalized Discounted Cumulative", "more indepth, especially LightGCN and NGCF, where we implemented them from scratch in", "ids to the new ids users = np.array([user2id[x] for x in test['userId'].unique()]) for", "adj_mat[n_users:, :n_users] = R.T adj_mat # In[19]: # Calculate degree matrix D (for", "In[1]: import os project_name = \"reco-tut-mlh\"; branch = \"main\"; account = \"sparsh-ai\" project_path", "train and test: {len(set(svd_pred_common['userId']))}\") continue # From the predictions, we want only the", "to the new ids users = np.array([user2id[x] for x in test['userId'].unique()]) recs =", "single year. We can see how LightGCN improves in ranking metrics compared to", "surprise import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.python.framework.ops import disable_eager_execution", "is not None) and (val_map_items.get(item_old) is not None) : user_new=val_map_users.get(user_old) # new value", "users in train and test are NOT equal') print(f\"# of users in train", "# In[17]: # Create DataFrame with reset index of 0-n_movies. movie_new = combined[['movieId']].drop_duplicates()", "COOrdinate format first ((row, column), data) coo = norm_adj_mat.tocoo().astype(np.float32) # create an index", "use test_data_te_, with the original ratings # Create column with the predicted movie's", "# Binarize train, validation and test data train_data = np.where(train_data > 3.5, 1.0,", "user reviewed that movie, 0 if they didn't). R = sp.dok_matrix((n_users, n_movies), dtype=np.float32)", "3.5, 1.0, 0.0) # Binarize test data: testing part (save non-binary version in", "import train_test_split from tensorflow.python.framework.ops import disable_eager_execution from tqdm import tqdm from utils import", "test data: testing part (save non-binary version in the separate object, will be", "['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD'] comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']) #", "import sys sys.path.insert(0, './code') # --- # # Collaborative Filtering Comparison # #", "test_df if name == 'SVAE' else test pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId',", "Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126 # 2. <NAME>, <NAME>, <NAME>, <NAME>, &", "Filtering, 2019, https://arxiv.org/abs/1905.08108 # 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb # 4. <NAME>, Netflix", "and inverse. D_inv_values = np.power(D_values + 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0 # Create", "predictions, we want only the top k for each user, # not all", "ndcg] # In[33]: comparison # # References: # # 1. <NAME>, <NAME>, <NAME>,", "= len(unique_users) train_users = unique_users[:(n_users - HELDOUT_USERS * 2)] val_users = unique_users[(n_users -", "of test set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse matrix back to df", "In[9]: fp = os.path.join('./data/bronze', 'u.data') raw_data = pd.read_csv(fp, sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp'])", "join and remove movies that have alread been seen (seen=1) merged = pd.merge(temp,", "verbose=True) svd.fit(surprise_train) # ### Recommend with SVD++ and SVD # In[31]: for model", "if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is not None) : user_new=val_map_users.get(user_old) #", "comparison.loc[len(comparison)] = [name, pak, rak, map, ndcg] # In[33]: comparison # # References:", "item_new=val_map_items.get(item_old) # new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution() svae_model", "svdpp.fit(surprise_train) # ### SVD # In[30]: svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train)", "(NDCG) # # where $k=10$ # # # # Imports # In[4]: get_ipython().system(u'pip", "for calculating NDCG) test_data_te_ratings = test_data_te.copy() test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0)", "29%, Recall@k by 18%, MAP by 12%, and NDCG by 35%**. # #", "first ((row, column), data) coo = norm_adj_mat.tocoo().astype(np.float32) # create an index that will", "1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, LightGCN: Simplifying and Powering Graph", "# Convert test user ids to the new ids users = np.array([user2id[x] for", "get_ipython().magic(u'cd \"{path}\"') import sys; sys.path.append(path) get_ipython().system(u'git config --global user.email \"<EMAIL>\"') get_ipython().system(u'git config --global", "# ## Graph Convoultional Networks (GCNs) # ### Light Graph Convolution Network (LightGCN)", "where the non-zero points are indices = np.mat([coo.row, coo.col]).transpose() # covert to sparse", "batch_size=100, k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings,", "= test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) != len(set(test['userId'])): print('Number of users in", "data into training and testing parts val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr,", "with the user-item graph. adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32)", "# Imports # In[4]: get_ipython().system(u'pip install -q surprise') # In[8]: import math import", "in both the train and test set common_users = set(test['userId']).intersection(set(predictions['userId'])) # Filter the", "map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank') ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId',", "project_name; get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd \"{path}\"') import sys; sys.path.append(path) get_ipython().system(u'git config --global user.email \"<EMAIL>\"')", "as_index = False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users = np.random.permutation(unique_users) HELDOUT_USERS = 200 #", "'movieId', 'rank') map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank') ndcg = metrics.ndcg(rec, tester,", "1.0, 0.0) val_data_te_ratings = val_data_te.copy() val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0) #", "n_users, n_items = n_movies, n_layers = 3 ) ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ###", "algorithm during the Netflix Prize competition, LightGCN achieves an increase in **Percision@k by", "n_users = len(unique_users) train_users = unique_users[:(n_users - HELDOUT_USERS * 2)] val_users = unique_users[(n_users", "tqdm from utils import stratified_split, numpy_stratified_split import build_features import metrics from models import", "users: for item in items: predictions.append([user, item, model.predict(user, item).est]) predictions = pd.DataFrame(predictions, columns=['userId',", "have the same users?: {set(train.userId) == set(test.userId)}') # In[16]: combined = train.append(test) n_users", "older sister model to LightGCN, but only by a single year. We can", "1 if the movie and user are connected. adj_mat[:n_users, n_users:] = R adj_mat[n_users:,", "the different models using ranking metrics: # # # * Precision@k # *", "In[4]: get_ipython().system(u'pip install -q surprise') # In[8]: import math import numpy as np", "Outer join and remove movies that have alread been seen (seen=1) merged =", "Variational Autoencoder (SVAE) # In[26]: # Binarize the data (only keep ratings >=", "get_ipython().system(u'rm /content/mykeys.py') path = \"/content/\" + project_name; get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd \"{path}\"') import sys;", "# In[24]: # Convert test user ids to the new ids users =", "HELDOUT_USERS)] test_users = unique_users[(n_users - HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)] test_set", "for each user, # not all the recommendations. # Extract the top k", "train['movieId'].unique() for user in users: for item in items: predictions.append([user, item, model.predict(user, item).est])", "# create an index that will tell SparseTensor where the non-zero points are", "k for each user, # not all the recommendations. # Extract the top", "indepth, especially LightGCN and NGCF, where we implemented them from scratch in Tensorflow.", "HELDOUT_USERS = 200 # Create train/validation/test users n_users = len(unique_users) train_users = unique_users[:(n_users", "pd.merge(test, movie_new, on='movieId', how='left') # Reset index to 0-n_users. test_reindex['userId_new'] = test_reindex['userId'] -", "and NGCF # In[24]: # Convert test user ids to the new ids", "(seen=1) merged = pd.merge(temp, predictions, on=['userId', 'movieId'], how=\"outer\") merged = merged[merged['seen'].isnull()].drop('seen', axis=1) #", "value item_new=test_map_items.get(item_old) # new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old) is not None)", "import SVAE from models.GCN import LightGCN, NGCF # # Prepare data # In[9]:", "print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123) # In[15]: train_size = 0.75 train, test = stratified_split(raw_data,", "(sparse matix where users are rows and movies are columns. # 1 if", "is not None) : user_new=val_map_users.get(user_old) # new value item_new=val_map_items.get(item_old) # new value rating=i['rating']", "users that appear in both the train and test set common_users = set(test['userId']).intersection(set(predictions['userId']))", "test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the original ratings # Create", "predictions = [] users = train['userId'].unique() items = train['movieId'].unique() for user in users:", "model LightGCN vastly outperforms all other models. When compared to SVD++, a widely", "= merged[merged['seen'].isnull()].drop('seen', axis=1) # Create filter for users that appear in both the", "> 3.5, 1.0, 0.0) # Binarize test data: training part test_data_tr = np.where(test_data_tr", "raw_data[raw_data['rating'] > 3.5] df_low_rating = raw_data[raw_data['rating'] <= 3.5] df = df_preferred.groupby('userId').filter(lambda x: len(x)", "In[20]: # to COOrdinate format first ((row, column), data) coo = norm_adj_mat.tocoo().astype(np.float32) #", "# Calculate degree matrix D (for every row count the number of nonzero", "movie_new, on='movieId', how='left') # Reset index to 0-n_users. train_reindex['userId_new'] = train_reindex['userId'] - 1", "# new value item_new=val_map_items.get(item_old) # new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() #", "models import SVAE from models.GCN import LightGCN, NGCF # # Prepare data #", "notebooks where we go more indepth, especially LightGCN and NGCF, where we implemented", "df_preferred.groupby('userId').filter(lambda x: len(x) >= 5) df = df.groupby('movieId').filter(lambda x: len(x) >= 1) #", "& <NAME>, Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108 # 3. Microsoft SVAE implementation:", "&& git push origin \"{branch}\"') # In[7]: import sys sys.path.insert(0, './code') # ---", "Shape: {test.shape}') print(f'Do they have the same users?: {set(train.userId) == set(test.userId)}') # In[16]:", "HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId'])", "# ### Light Graph Convolution Network (LightGCN) # In[21]: light_model = LightGCN(A_tilde, n_users", "the diagonals. D_inv_sq_root = sp.diags(D_inv_values) # Eval (D^-0.5 * A * D^-0.5). norm_adj_mat", "top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 top_k = top_movies.copy() top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount()", "items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain the sparse matrix for train, validation", "has their own individual notebooks where we go more indepth, especially LightGCN and", "(LightGCN) # In[21]: light_model = LightGCN(A_tilde, n_users = n_users, n_items = n_movies, n_layers", "top_k = recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user,", "BOTH train and test: {len(set(svd_pred_common['userId']))}\") continue # From the predictions, we want only", "\"<EMAIL>\"') get_ipython().system(u'git config --global user.name \"reco-tut\"') get_ipython().system(u'git init') get_ipython().system(u'git remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git')", "In[18]: # Create user-item graph (sparse matix where users are rows and movies", "utf-8 # In[1]: import os project_name = \"reco-tut-mlh\"; branch = \"main\"; account =", "NGCF, where we implemented them from scratch in Tensorflow. # # The last", "titles. fp = os.path.join('./data/bronze', 'u.item') movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols =", "test_data_te.copy() test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0) # retrieve real ratings from", "{raw_data.shape}') raw_data.sample(10, random_state=123) # In[10]: # Load movie titles. fp = os.path.join('./data/bronze', 'u.item')", "{set(train.userId) == set(test.userId)}') # In[16]: combined = train.append(test) n_users = combined['userId'].nunique() print('Number of", "### Neural Graph Collaborative Filtering (NGCF) # In[23]: ngcf_model = NGCF(A_tilde, n_users =", "# Split validation and test data into training and testing parts val_data_tr, val_data_te", "(SVD) # ### SVD++ # In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp =", "--global user.name \"reco-tut\"') get_ipython().system(u'git init') get_ipython().system(u'git remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull origin", "span of just 1-2 years. # In[32]: model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++',", "SVD++, a widely used algorithm during the Netflix Prize competition, LightGCN achieves an", "test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100, k=10,", "len(set(test['userId'])): print('Number of users in train and test are NOT equal') print(f\"# of", "that are also in the test set recs.append(top_k) # ## Standard Variational Autoencoder", "set for all sets am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test", "# coding: utf-8 # In[1]: import os project_name = \"reco-tut-mlh\"; branch = \"main\";", "be developed in the span of just 1-2 years. # In[32]: model_names =", "\"{path}\"') get_ipython().magic(u'cd \"{path}\"') import sys; sys.path.append(path) get_ipython().system(u'git config --global user.email \"<EMAIL>\"') get_ipython().system(u'git config", "5) df = df.groupby('movieId').filter(lambda x: len(x) >= 1) # Obtain both usercount and", "and NDCG by 35%**. # # NGCF is the older sister model to", "increases can be developed in the span of just 1-2 years. # In[32]:", "operations. # # In conclusion, this demonstrates how far recommendation systems have advanced", "nodes/vertices. # 1 if the movie and user are connected. adj_mat[:n_users, n_users:] =", "Recommend with LightGCN and NGCF # In[24]: # Convert test user ids to", "\"/content/\" + project_name; get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd \"{path}\"') import sys; sys.path.append(path) get_ipython().system(u'git config --global", "scipy.sparse as sp import surprise import tensorflow as tf from sklearn.model_selection import train_test_split", "> 3.5, 1.0, 0.0) test_data = np.where(test_data > 3.5, 1.0, 0.0) # Binarize", "os project_name = \"reco-tut-mlh\"; branch = \"main\"; account = \"sparsh-ai\" project_path = os.path.join('/content',", "and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"# of users in BOTH train and test:", "np.array([user2id[x] for x in test['userId'].unique()]) recs = [] for model in [light_model, ngcf_model]:", "test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123) # Binarize train, validation and test data train_data", "respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"# of users in BOTH train and test: {len(set(svd_pred_common['userId']))}\") continue", "the state-of-the-art model LightGCN vastly outperforms all other models. When compared to SVD++,", "Obtain both usercount and itemcount after filtering usercount = df[['userId']].groupby('userId', as_index = False).size()", "user in users: for item in items: predictions.append([user, item, model.predict(user, item).est]) predictions =", "Gain (NDCG) # # where $k=10$ # # # # Imports # In[4]:", "of our models, we can see that the state-of-the-art model LightGCN vastly outperforms", "1 # For each user, only include movies recommendations that are also in", "# Create column with the predicted movie's rank for each user top_k =", "widely used algorithm during the Netflix Prize competition, LightGCN achieves an increase in", "other models. When compared to SVD++, a widely used algorithm during the Netflix", "import os import pandas as pd import random import requests import scipy.sparse as", "test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the sparse matrix generation for train, validation and test sets", "number of nonzero entries) D_values = np.array(adj_mat.sum(1)) # Square root and inverse. D_inv_values", ") # ### Recommend with SVAE # In[28]: # Model prediction on the", "# References: # # 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, LightGCN:", "# Obtain the sparse matrix for train, validation and test sets train_data, _,", "movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id = dict(zip(train['userId'], train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'], train['userId']))", "are also in the test set recs.append(top_k) # ## Singular Value Decomposition (SVD)", "index to 0-n_users. train_reindex['userId_new'] = train_reindex['userId'] - 1 train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']]", "get_ipython().system(u'git add . && git commit -m \\'commit\\' && git push origin \"{branch}\"')", "Convert sparse matrix back to df recommendations = am_test.map_back_sparse(top_k, kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings,", "'rank') map = metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank') ndcg = metrics.ndcg(rec, tester, 'userId',", "set common_users = set(test['userId']).intersection(set(predictions['userId'])) # Filter the test and predictions so they have", "recommendation systems starting with the state-of-the-art LightGCN and going back to the winning", "[name, pak, rak, map, ndcg] # In[33]: comparison # # References: # #", "= build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain the sparse matrix for train, validation and test", "n_layers = 3 ) ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Recommend with LightGCN and", "in df_low_rating.iterrows(): user_old= i['userId'] # old value item_old=i['movieId'] # old value if (test_map_users.get(user_old)", "= np.array([user2id[x] for x in test['userId'].unique()]) for rec, name in zip(recs, model_names): tester", "are rows and movies are columns. # 1 if a user reviewed that", "names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123) # In[15]: train_size", "same users?: {set(train.userId) == set(test.userId)}') # In[16]: combined = train.append(test) n_users = combined['userId'].nunique()", "for model in [svdpp, svd]: predictions = [] users = train['userId'].unique() items =", "origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull origin \"{branch}\"') get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd \"{project_path}\"') #", "n_users, n_items = n_movies, n_layers = 3) # In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25,", "import random import requests import scipy.sparse as sp import surprise import tensorflow as", "in [light_model, ngcf_model]: recommendations = model.recommend(users, k=10) recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item})", "intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100, k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data,", "rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is not None)", "Recommend with SVD++ and SVD # In[31]: for model in [svdpp, svd]: predictions", "HELDOUT_USERS * 2)] val_users = unique_users[(n_users - HELDOUT_USERS * 2) : (n_users -", "= np.where(train_data > 3.5, 1.0, 0.0) val_data = np.where(val_data > 3.5, 1.0, 0.0)", "'u.data') raw_data = pd.read_csv(fp, sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123)", "to 0-n_users. test_reindex['userId_new'] = test_reindex['userId'] - 1 test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']] #", "can be developed in the span of just 1-2 years. # In[32]: model_names", "kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the original ratings #", "sparse matrix back to df recommendations = am_test.map_back_sparse(top_k, kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings')", "# In[1]: import os project_name = \"reco-tut-mlh\"; branch = \"main\"; account = \"sparsh-ai\"", "rak, map, ndcg] # In[33]: comparison # # References: # # 1. <NAME>,", "# In[18]: # Create user-item graph (sparse matix where users are rows and", "len(x) >= 1) # Obtain both usercount and itemcount after filtering usercount =", "= df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId']) val_set =", "= unique_users[(n_users - HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)]", "user are connected. adj_mat[:n_users, n_users:] = R adj_mat[n_users:, :n_users] = R.T adj_mat #", "# In[19]: # Calculate degree matrix D (for every row count the number", "= n_users, n_items = n_movies, n_layers = 3) # In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)", "is not None) : user_new=test_map_users.get(user_old) # new value item_new=test_map_items.get(item_old) # new value rating=i['rating']", "# In[16]: combined = train.append(test) n_users = combined['userId'].nunique() print('Number of users:', n_users) n_movies", "pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction']) # Remove movies already seen by users # Create", "predictions so they have the same users between them test_common = test[test['userId'].isin(common_users)] svd_pred_common", "'movieId', 'rank') rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank') map = metrics.mean_average_precision(rec, tester,", "import metrics from models import SVAE from models.GCN import LightGCN, NGCF # #", "row count the number of nonzero entries) D_values = np.array(adj_mat.sum(1)) # Square root", "import disable_eager_execution from tqdm import tqdm from utils import stratified_split, numpy_stratified_split import build_features", "SVAE from models.GCN import LightGCN, NGCF # # Prepare data # In[9]: fp", "Convolution Network (LightGCN) # In[21]: light_model = LightGCN(A_tilde, n_users = n_users, n_items =", "compare different recommendation systems starting with the state-of-the-art LightGCN and going back to", "# In[21]: light_model = LightGCN(A_tilde, n_users = n_users, n_items = n_movies, n_layers =", "0.0) # Binarize test data: testing part (save non-binary version in the separate", "When compared to SVD++, a widely used algorithm during the Netflix Prize competition,", "matrix with the values of D^(-0.5) are the diagonals. D_inv_sq_root = sp.diags(D_inv_values) #", "on the training part of test set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse", "the sparse matrix for train, validation and test sets train_data, _, _ =", "the test and predictions so they have the same users between them test_common", "movies recommendations that are also in the test set recs.append(top_k) # ## Standard", "Filter the test and predictions so they have the same users between them", "root and inverse. D_inv_values = np.power(D_values + 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0 #", "# # In conclusion, this demonstrates how far recommendation systems have advanced since", "and itemcount after filtering usercount = df[['userId']].groupby('userId', as_index = False).size() itemcount = df[['movieId']].groupby('movieId',", "sp import surprise import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.python.framework.ops", "1 train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex = pd.merge(test, movie_new, on='movieId', how='left') #", "not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm /content/mykeys.py') path = \"/content/\" +", "0.0) val_data = np.where(val_data > 3.5, 1.0, 0.0) test_data = np.where(test_data > 3.5,", "SVD # In[31]: for model in [svdpp, svd]: predictions = [] users =", "users are nodes/vertices. # 1 if the movie and user are connected. adj_mat[:n_users,", "df recommendations = am_test.map_back_sparse(top_k, kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with", "import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.python.framework.ops import disable_eager_execution from", "test_users = unique_users[(n_users - HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)] test_set =", "# new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution() svae_model =", "am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix() # Split", "all sets am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set,", "test sets train_data, _, _ = am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix() test_data,", "# Convert sparse matrix back to df recommendations = am_test.map_back_sparse(top_k, kind='prediction') test_df =", "R.tolil() # Put together adjacency matrix. Movies and users are nodes/vertices. # 1", "project_name) # In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm /content/mykeys.py')", "different models using ranking metrics: # # # * Precision@k # * Recall@k", "Create dictionaries so we can convert to and from indexes item2id = dict(zip(movie_new['movieId'],", "# * Normalized Discounted Cumulative Gain (NDCG) # # where $k=10$ # #", "an index that will tell SparseTensor where the non-zero points are indices =", "ratio=0.75, seed=123) # Binarize train, validation and test data train_data = np.where(train_data >", "(only keep ratings >= 4) df_preferred = raw_data[raw_data['rating'] > 3.5] df_low_rating = raw_data[raw_data['rating']", "val_users = unique_users[(n_users - HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)] test_users =", "'rating']] test_reindex = pd.merge(test, movie_new, on='movieId', how='left') # Reset index to 0-n_users. test_reindex['userId_new']", "especially LightGCN and NGCF, where we implemented them from scratch in Tensorflow. #", "reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train) # ### SVD # In[30]:", "the test set recs.append(top_k) # # Compare performance # Looking at all 5", "= pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']) # Convert test user ids to the", "recommendations that are also in the test set recs.append(top_k) # ## Singular Value", "len(unique_users) train_users = unique_users[:(n_users - HELDOUT_USERS * 2)] val_users = unique_users[(n_users - HELDOUT_USERS", "by 18%, MAP by 12%, and NDCG by 35%**. # # NGCF is", "= df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate", "id2user = dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]: # Create user-item graph (sparse matix where", "Create filter for users that appear in both the train and test set", "and test: {len(set(svd_pred_common['userId']))}\") continue # From the predictions, we want only the top", "have advanced since 2009, and how new model architectures with notable performance increases", "indexes item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id = dict(zip(train['userId'], train_reindex['userId_new']))", "0-n_users. test_reindex['userId_new'] = test_reindex['userId'] - 1 test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']] # Create", "Simplifying and Powering Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126 # 2. <NAME>,", "parts val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123)", "competition, LightGCN achieves an increase in **Percision@k by 29%, Recall@k by 18%, MAP", "os.path.join('./data/bronze', 'u.data') raw_data = pd.read_csv(fp, sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10,", "= pd.merge(train, movie_new, on='movieId', how='left') # Reset index to 0-n_users. train_reindex['userId_new'] = train_reindex['userId']", "-q surprise') # In[8]: import math import numpy as np import os import", "A * D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: # to COOrdinate format first", "np.where(test_data_tr > 3.5, 1.0, 0.0) # Binarize test data: testing part (save non-binary", "surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train) #", "together adjacency matrix. Movies and users are nodes/vertices. # 1 if the movie", "entries) D_values = np.array(adj_mat.sum(1)) # Square root and inverse. D_inv_values = np.power(D_values +", "user_old= i['userId'] # old value item_old=i['movieId'] # old value if (test_map_users.get(user_old) is not", "notebook we compare different recommendation systems starting with the state-of-the-art LightGCN and going", "/content') import mykeys get_ipython().system(u'rm /content/mykeys.py') path = \"/content/\" + project_name; get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd", "we go more indepth, especially LightGCN and NGCF, where we implemented them from", "SVD++, and SVD. Each model has their own individual notebooks where we go", "movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10,", "sets train_data, _, _ = am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix() test_data, test_map_users,", "top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies", "200 # Create train/validation/test users n_users = len(unique_users) train_users = unique_users[:(n_users - HELDOUT_USERS", "$k=10$ # # # # Imports # In[4]: get_ipython().system(u'pip install -q surprise') #", "didn't). R = sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1 # Create the", "test_data = np.where(test_data > 3.5, 1.0, 0.0) # Binarize validation data val_data_tr =", "inverse. D_inv_values = np.power(D_values + 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0 # Create sparse", "the Netflix Prize competition, LightGCN achieves an increase in **Percision@k by 29%, Recall@k", "np.random.seed(123) unique_users = np.random.permutation(unique_users) HELDOUT_USERS = 200 # Create train/validation/test users n_users =", "len(set(merged['userId'])) != len(set(test['userId'])): print('Number of users in train and test are NOT equal')", "from models.GCN import LightGCN, NGCF # # Prepare data # In[9]: fp =", "original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100, k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0 )", "# In[20]: # to COOrdinate format first ((row, column), data) coo = norm_adj_mat.tocoo().astype(np.float32)", "n_users = n_users, n_items = n_movies, n_layers = 3 ) ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer)", "In[21]: light_model = LightGCN(A_tilde, n_users = n_users, n_items = n_movies, n_layers = 3)", "users = train['userId'].unique() items = train['movieId'].unique() for user in users: for item in", "test and predictions so they have the same users between them test_common =", "surprise') # In[8]: import math import numpy as np import os import pandas", "val_map_users, val_map_items = am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix() # Split validation and", "'SVAE', 'SVD++', 'SVD'] comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG']) # Convert test", "n_users = n_users, n_items = n_movies, n_layers = 3) # In[22]: optimizer =", "Recall@k by 18%, MAP by 12%, and NDCG by 35%**. # # NGCF", "the recommendations. # Extract the top k recommendations from the predictions top_movies =", "else test pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank') rak = metrics.recall_at_k(rec, tester,", "= \"main\"; account = \"sparsh-ai\" project_path = os.path.join('/content', project_name) # In[2]: if not", "<NAME>, <NAME>, <NAME>, & <NAME>, Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108 # 3.", "= \"sparsh-ai\" project_path = os.path.join('/content', project_name) # In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py", "None) and (val_map_items.get(item_old) is not None) : user_new=val_map_users.get(user_old) # new value item_new=val_map_items.get(item_old) #", "data (only keep ratings >= 4) df_preferred = raw_data[raw_data['rating'] > 3.5] df_low_rating =", "val_data = np.where(val_data > 3.5, 1.0, 0.0) test_data = np.where(test_data > 3.5, 1.0,", "between them test_common = test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) != len(set(test['userId'])): print('Number", "train['userId'].unique() items = train['movieId'].unique() for user in users: for item in items: predictions.append([user,", "optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Neural Graph Collaborative Filtering (NGCF)", "print(f\"# of users in train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"# of users", "# Filter the test and predictions so they have the same users between", "+ 1 # For each user, only include movies recommendations that are also", "= np.random.permutation(unique_users) HELDOUT_USERS = 200 # Create train/validation/test users n_users = len(unique_users) train_users", "LightGCN and NGCF, where we implemented them from scratch in Tensorflow. # #", "= unique_users[:(n_users - HELDOUT_USERS * 2)] val_users = unique_users[(n_users - HELDOUT_USERS * 2)", "svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train) # ### Recommend with SVD++ and", "> 3.5] df_low_rating = raw_data[raw_data['rating'] <= 3.5] df = df_preferred.groupby('userId').filter(lambda x: len(x) >=", "the sparse matrix generation for train, validation and test sets # use list", "after filtering usercount = df[['userId']].groupby('userId', as_index = False).size() itemcount = df[['movieId']].groupby('movieId', as_index =", "pull origin \"{branch}\"') get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd \"{project_path}\"') # In[34]: get_ipython().system(u'git status')", "models, we can see that the state-of-the-art model LightGCN vastly outperforms all other", "sets # use list of unique items from training set for all sets", "and SVD. Each model has their own individual notebooks where we go more", "dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id = dict(zip(train['userId'], train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'],", "to NGCF by simply removing unnecessary operations. # # In conclusion, this demonstrates", "--- # # Collaborative Filtering Comparison # # In this notebook we compare", "<NAME>, <NAME>, <NAME>, <NAME> & <NAME>, LightGCN: Simplifying and Powering Graph Convolution Network", "value item_new=val_map_items.get(item_old) # new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution()", "metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank') ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank') comparison.loc[len(comparison)]", "random_state=123) # In[15]: train_size = 0.75 train, test = stratified_split(raw_data, 'userId', train_size) print(f'Train", "test_reindex['userId_new'] = test_reindex['userId'] - 1 test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']] # Create dictionaries", "# Create dictionaries so we can convert to and from indexes item2id =", "they have the same users between them test_common = test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)]", "In conclusion, this demonstrates how far recommendation systems have advanced since 2009, and", "since 2009, and how new model architectures with notable performance increases can be", "index of 0-n_movies. movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex = pd.merge(train, movie_new,", "# In[32]: model_names = ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD'] comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k',", "(D^-0.5 * A * D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: # to COOrdinate", "data val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0) val_data_te_ratings = val_data_te.copy() val_data_te =", "in ranking metrics compared to NGCF by simply removing unnecessary operations. # #", "not None) : user_new=val_map_users.get(user_old) # new value item_new=val_map_items.get(item_old) # new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]=", "get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm /content/mykeys.py') path = \"/content/\" + project_name; get_ipython().system(u'mkdir", "have alread been seen (seen=1) merged = pd.merge(temp, predictions, on=['userId', 'movieId'], how=\"outer\") merged", "# In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Neural Graph Collaborative", "import pandas as pd import random import requests import scipy.sparse as sp import", "movie_new['movieId'])) user2id = dict(zip(train['userId'], train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'], train['userId'])) # In[18]: # Create", "id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id = dict(zip(train['userId'], train_reindex['userId_new'])) id2user = dict(zip(train_reindex['userId_new'], train['userId'])) #", "on='movieId' )[['userId', 'movieId', 'title', 'prediction']] # Create column with the predicted movie's rank", "train_reindex['movieId_new']] = 1 # Create the adjaceny matrix with the user-item graph. adj_mat", "calculating NDCG) test_data_te_ratings = test_data_te.copy() test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0) #", "> 3.5, 1.0, 0.0) # retrieve real ratings from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings)", "'movieId', 'rank') ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank') comparison.loc[len(comparison)] = [name, pak,", "numpy_stratified_split import build_features import metrics from models import SVAE from models.GCN import LightGCN,", "testing part (save non-binary version in the separate object, will be used for", "set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse matrix back to df recommendations =", "np import os import pandas as pd import random import requests import scipy.sparse", "train_set = df.loc[df['userId'].isin(train_users)] val_set = df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId']) val_set", "back to the winning algorithm for 2009's Netflix Prize competition, SVD++. # #", "n_users) n_movies = combined['movieId'].nunique() print('Number of movies:', n_movies) # In[17]: # Create DataFrame", "this notebook we compare different recommendation systems starting with the state-of-the-art LightGCN and", "get_ipython().system(u'git pull origin \"{branch}\"') get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd \"{project_path}\"') # In[34]: get_ipython().system(u'git", "test set recs.append(top_k) # ## Singular Value Decomposition (SVD) # ### SVD++ #", "if len(set(merged['userId'])) != len(set(test['userId'])): print('Number of users in train and test are NOT", "D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: # to COOrdinate format first ((row, column), data) coo =", "and test are NOT equal') print(f\"# of users in train and test respectively:", "2019, https://arxiv.org/abs/1905.08108 # 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb # 4. <NAME>, Netflix Prize", "model in [light_model, ngcf_model]: recommendations = model.recommend(users, k=10) recommendations = recommendations.replace({'userId': id2user, 'movieId':", "by a single year. We can see how LightGCN improves in ranking metrics", "test data train_data = np.where(train_data > 3.5, 1.0, 0.0) val_data = np.where(val_data >", "NGCF(A_tilde, n_users = n_users, n_items = n_movies, n_layers = 3 ) ngcf_model.fit(epochs=25, batch_size=1024,", "# where $k=10$ # # # # Imports # In[4]: get_ipython().system(u'pip install -q", "to LightGCN, but only by a single year. We can see how LightGCN", "0.0) # Binarize test data: training part test_data_tr = np.where(test_data_tr > 3.5, 1.0,", "= train_reindex['userId'] - 1 train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex = pd.merge(test, movie_new,", "- 1 test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']] # Create dictionaries so we can", "and (test_map_items.get(item_old) is not None) : user_new=test_map_users.get(user_old) # new value item_new=test_map_items.get(item_old) # new", "movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex = pd.merge(train, movie_new, on='movieId', how='left') #", "References: # # 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, LightGCN: Simplifying", "train_size = 0.75 train, test = stratified_split(raw_data, 'userId', train_size) print(f'Train Shape: {train.shape}') print(f'Test", "val_data_te.copy() val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0) # Binarize test data: training", "them from scratch in Tensorflow. # # The last cell compares the performance", "= combined['movieId'].nunique() print('Number of movies:', n_movies) # In[17]: # Create DataFrame with reset", "notable performance increases can be developed in the span of just 1-2 years.", "unique_users[(n_users - HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)] test_users = unique_users[(n_users -", "year. We can see how LightGCN improves in ranking metrics compared to NGCF", "### Light Graph Convolution Network (LightGCN) # In[21]: light_model = LightGCN(A_tilde, n_users =", "appear in both the train and test set common_users = set(test['userId']).intersection(set(predictions['userId'])) # Filter", "we implemented them from scratch in Tensorflow. # # The last cell compares", "+ n_movies, n_users + n_movies), dtype=np.float32) # List of lists. adj_mat.tolil() R =", "count the number of nonzero entries) D_values = np.array(adj_mat.sum(1)) # Square root and", "values of D^(-0.5) are the diagonals. D_inv_sq_root = sp.diags(D_inv_values) # Eval (D^-0.5 *", "for all sets am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test =", "val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0) # Binarize test data: training part", "Shape: {train.shape}') print(f'Test Shape: {test.shape}') print(f'Do they have the same users?: {set(train.userId) ==", "LightGCN improves in ranking metrics compared to NGCF by simply removing unnecessary operations.", "Put together adjacency matrix. Movies and users are nodes/vertices. # 1 if the", "movie_titles.sample(10, random_state=123) # In[15]: train_size = 0.75 train, test = stratified_split(raw_data, 'userId', train_size)", "train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"# of users in BOTH train and", "os.path.join('/content', project_name) # In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm", "import LightGCN, NGCF # # Prepare data # In[9]: fp = os.path.join('./data/bronze', 'u.data')", "<NAME>, Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108 # 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb", "n_movies, n_layers = 3 ) ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Recommend with LightGCN", "= train['movieId'].unique() for user in users: for item in items: predictions.append([user, item, model.predict(user,", "item in items: predictions.append([user, item, model.predict(user, item).est]) predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction'])", "train['userId'])) # In[18]: # Create user-item graph (sparse matix where users are rows", "test set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse matrix back to df recommendations", "for user in users: for item in items: predictions.append([user, item, model.predict(user, item).est]) predictions", "project_path = os.path.join('/content', project_name) # In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import", "((row, column), data) coo = norm_adj_mat.tocoo().astype(np.float32) # create an index that will tell", "**Percision@k by 29%, Recall@k by 18%, MAP by 12%, and NDCG by 35%**.", "5 of our models, we can see that the state-of-the-art model LightGCN vastly", "https://arxiv.org/abs/2002.02126 # 2. <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>, Neural Graph Collaorative Filtering,", "as pd import random import requests import scipy.sparse as sp import surprise import", "sklearn.model_selection import train_test_split from tensorflow.python.framework.ops import disable_eager_execution from tqdm import tqdm from utils", "if they didn't). R = sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1 #", "= surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10, verbose=True) svdpp.fit(surprise_train) # ### SVD # In[30]: svd =", "train_size) print(f'Train Shape: {train.shape}') print(f'Test Shape: {test.shape}') print(f'Do they have the same users?:", "name == 'SVAE' else test pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank') rak", "different recommendation systems starting with the state-of-the-art LightGCN and going back to the", "item_new=test_map_items.get(item_old) # new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old) is not None) and", "= recommendations.replace({'userId': id2user, 'movieId': id2item}) recommendations = recommendations.merge(movie_titles, how='left', on='movieId' )[['userId', 'movieId', 'title',", "and how new model architectures with notable performance increases can be developed in", "# Collaborative Filtering Comparison # # In this notebook we compare different recommendation", "import surprise import tensorflow as tf from sklearn.model_selection import train_test_split from tensorflow.python.framework.ops import", "In this notebook we compare different recommendation systems starting with the state-of-the-art LightGCN", "tell SparseTensor where the non-zero points are indices = np.mat([coo.row, coo.col]).transpose() # covert", "will tell SparseTensor where the non-zero points are indices = np.mat([coo.row, coo.col]).transpose() #", "test_reindex[['userId_new', 'movieId_new', 'rating']] # Create dictionaries so we can convert to and from", "1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0 # Create sparse matrix with the values of", "# ### SVD++ # In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0,", "going back to the winning algorithm for 2009's Netflix Prize competition, SVD++. #", "pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank') rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId',", "models using ranking metrics: # # # * Precision@k # * Recall@k #", "SVAE, SVD++, and SVD. Each model has their own individual notebooks where we", "np.where(test_data_te > 3.5, 1.0, 0.0) # retrieve real ratings from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings)", "\"reco-tut-mlh\"; branch = \"main\"; account = \"sparsh-ai\" project_path = os.path.join('/content', project_name) # In[2]:", "SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100, k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0", "this demonstrates how far recommendation systems have advanced since 2009, and how new", "= sp.diags(D_inv_values) # Eval (D^-0.5 * A * D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) #", "Filtering Comparison # # In this notebook we compare different recommendation systems starting", "ratings >= 4) df_preferred = raw_data[raw_data['rating'] > 3.5] df_low_rating = raw_data[raw_data['rating'] <= 3.5]", "tf from sklearn.model_selection import train_test_split from tensorflow.python.framework.ops import disable_eager_execution from tqdm import tqdm", "# Instantiate the sparse matrix generation for train, validation and test sets #", "get_ipython().magic(u'cd \"{project_path}\"') # In[34]: get_ipython().system(u'git status') # In[35]: get_ipython().system(u'git add . && git", "get_ipython().system(u'pip install -q surprise') # In[8]: import math import numpy as np import", "test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the sparse matrix generation for train, validation and", "In[17]: # Create DataFrame with reset index of 0-n_movies. movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new']", "matrix with the user-item graph. adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies),", "#!/usr/bin/env python # coding: utf-8 # In[1]: import os project_name = \"reco-tut-mlh\"; branch", "all the recommendations. # Extract the top k recommendations from the predictions top_movies", "Binarize the data (only keep ratings >= 4) df_preferred = raw_data[raw_data['rating'] > 3.5]", "# Obtain both usercount and itemcount after filtering usercount = df[['userId']].groupby('userId', as_index =", "Instantiate the sparse matrix generation for train, validation and test sets # use", "dictionaries so we can convert to and from indexes item2id = dict(zip(movie_new['movieId'], movie_new['movieId_new']))", "= np.where(test_data_te > 3.5, 1.0, 0.0) # retrieve real ratings from initial dataset", "am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) #", "and test data into training and testing parts val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75,", "index to 0-n_users. test_reindex['userId_new'] = test_reindex['userId'] - 1 test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']]", "adj_mat.tolil() R = R.tolil() # Put together adjacency matrix. Movies and users are", "<NAME> & <NAME>, LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation, 2020,", "# Create filter for users that appear in both the train and test", "(n_users - HELDOUT_USERS)] test_users = unique_users[(n_users - HELDOUT_USERS):] train_set = df.loc[df['userId'].isin(train_users)] val_set =", "In[27]: disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100, k=10, verbose=0, seed=123,", "= SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100, k=10, verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False,", "same users between them test_common = test[test['userId'].isin(common_users)] svd_pred_common = merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) !=", "test_set = df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] #", "= val_data_te.copy() val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0) # Binarize test data:", "top_movies.copy() top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 # For each user, only include", "svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 top_k =", "= 3) # In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Neural", "train, validation and test data train_data = np.where(train_data > 3.5, 1.0, 0.0) val_data", "# Reset index to 0-n_users. train_reindex['userId_new'] = train_reindex['userId'] - 1 train_reindex = train_reindex[['userId_new',", "been seen (seen=1) merged = pd.merge(temp, predictions, on=['userId', 'movieId'], how=\"outer\") merged = merged[merged['seen'].isnull()].drop('seen',", "are also in the test set recs.append(top_k) # ## Standard Variational Autoencoder (SVAE)", "<NAME>, & <NAME>, Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108 # 3. Microsoft SVAE", "mykeys get_ipython().system(u'rm /content/mykeys.py') path = \"/content/\" + project_name; get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd \"{path}\"') import", "unique_train_items = pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)] # Instantiate the sparse", "test pak = metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank') rak = metrics.recall_at_k(rec, tester, 'userId',", "_ = am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix()", "train and test set common_users = set(test['userId']).intersection(set(predictions['userId'])) # Filter the test and predictions", "to df recommendations = am_test.map_back_sparse(top_k, kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_,", "= pd.read_csv(fp, sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123) # In[10]:", "Create the adjaceny matrix with the user-item graph. adj_mat = sp.dok_matrix((n_users + n_movies,", "1.0, 0.0) val_data = np.where(val_data > 3.5, 1.0, 0.0) test_data = np.where(test_data >", "SVD++ and SVD # In[31]: for model in [svdpp, svd]: predictions = []", "= np.array(adj_mat.sum(1)) # Square root and inverse. D_inv_values = np.power(D_values + 1e-9, -0.5).flatten()", "add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull origin \"{branch}\"') get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd \"{project_path}\"')", "test are NOT equal') print(f\"# of users in train and test respectively: {len(set(merged['userId']))},", "Singular Value Decomposition (SVD) # ### SVD++ # In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1),", "that have alread been seen (seen=1) merged = pd.merge(temp, predictions, on=['userId', 'movieId'], how=\"outer\")", "that are also in the test set recs.append(top_k) # # Compare performance #", "train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex = pd.merge(test, movie_new, on='movieId', how='left') # Reset index to", "R adj_mat[n_users:, :n_users] = R.T adj_mat # In[19]: # Calculate degree matrix D", "the separate object, will be used for calculating NDCG) test_data_te_ratings = test_data_te.copy() test_data_te", "combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex = pd.merge(train, movie_new, on='movieId', how='left') # Reset index", "training part test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0) # Binarize test data:", "the older sister model to LightGCN, but only by a single year. We", "## Graph Convoultional Networks (GCNs) # ### Light Graph Convolution Network (LightGCN) #", "light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Neural Graph Collaborative Filtering (NGCF) # In[23]: ngcf_model", "Binarize validation data val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0) val_data_te_ratings = val_data_te.copy()", "of lists. adj_mat.tolil() R = R.tolil() # Put together adjacency matrix. Movies and", "adj_mat # In[19]: # Calculate degree matrix D (for every row count the", "x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 top_k = top_movies.copy() top_k['rank'] =", "sp.diags(D_inv_values) # Eval (D^-0.5 * A * D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]:", "In[26]: # Binarize the data (only keep ratings >= 4) df_preferred = raw_data[raw_data['rating']", "value if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is not None) : user_new=test_map_users.get(user_old)", "# # Compare performance # Looking at all 5 of our models, we", "= LightGCN(A_tilde, n_users = n_users, n_items = n_movies, n_layers = 3) # In[22]:", "np.power(D_values + 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0 # Create sparse matrix with the", "build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain the", "recommendations from the predictions top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] =", "i['userId'] # old value item_old=i['movieId'] # old value if (test_map_users.get(user_old) is not None)", "Binarize test data: training part test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0) #", "adj_mat[:n_users, n_users:] = R adj_mat[n_users:, :n_users] = R.T adj_mat # In[19]: # Calculate", "users = np.array([user2id[x] for x in test['userId'].unique()]) recs = [] for model in", "- 1 train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex = pd.merge(test, movie_new, on='movieId', how='left')", "of users in train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"# of users in", "print('Number of users in train and test are NOT equal') print(f\"# of users", "(val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is not None) : user_new=val_map_users.get(user_old) # new", "for train, validation and test sets train_data, _, _ = am_train.gen_affinity_matrix() val_data, val_map_users,", "n_movies), dtype=np.float32) # List of lists. adj_mat.tolil() R = R.tolil() # Put together", "## Singular Value Decomposition (SVD) # ### SVD++ # In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp',", "new value item_new=test_map_items.get(item_old) # new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old) is not", "recs = [] for model in [light_model, ngcf_model]: recommendations = model.recommend(users, k=10) recommendations", "compared to SVD++, a widely used algorithm during the Netflix Prize competition, LightGCN", "= tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) # ### Neural Graph Collaborative Filtering (NGCF) #", "# ## Singular Value Decomposition (SVD) # ### SVD++ # In[29]: surprise_train =", "during the Netflix Prize competition, LightGCN achieves an increase in **Percision@k by 29%,", "go more indepth, especially LightGCN and NGCF, where we implemented them from scratch", "os import pandas as pd import random import requests import scipy.sparse as sp", "disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100, k=10, verbose=0, seed=123, drop_encoder=0.5,", "fp = os.path.join('./data/bronze', 'u.data') raw_data = pd.read_csv(fp, sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp']) print(f'Shape:", "training part of test set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert sparse matrix back", "# NGCF is the older sister model to LightGCN, but only by a", "Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108 # 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb # 4. <NAME>,", "scratch in Tensorflow. # # The last cell compares the performance of the", "df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5) df = df.groupby('movieId').filter(lambda x: len(x) >=", "R = R.tolil() # Put together adjacency matrix. Movies and users are nodes/vertices.", "verbose=0, seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val )", "part (save non-binary version in the separate object, will be used for calculating", "in **Percision@k by 29%, Recall@k by 18%, MAP by 12%, and NDCG by", "Prize competition, LightGCN achieves an increase in **Percision@k by 29%, Recall@k by 18%,", "matix where users are rows and movies are columns. # 1 if a", "by 35%**. # # NGCF is the older sister model to LightGCN, but", "\"main\"; account = \"sparsh-ai\" project_path = os.path.join('/content', project_name) # In[2]: if not os.path.exists(project_path):", "surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train) # ### Recommend with SVD++ and SVD #", "original ratings # Create column with the predicted movie's rank for each user", "fp = os.path.join('./data/bronze', 'u.item') movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2),", "common_users = set(test['userId']).intersection(set(predictions['userId'])) # Filter the test and predictions so they have the", "k=10) recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item}) recommendations = recommendations.merge(movie_titles, how='left', on='movieId' )[['userId',", "# Looking at all 5 of our models, we can see that the", "ids users = np.array([user2id[x] for x in test['userId'].unique()]) recs = [] for model", "- HELDOUT_USERS * 2)] val_users = unique_users[(n_users - HELDOUT_USERS * 2) : (n_users", "are indices = np.mat([coo.row, coo.col]).transpose() # covert to sparse tensor A_tilde = tf.SparseTensor(indices,", "(for every row count the number of nonzero entries) D_values = np.array(adj_mat.sum(1)) #", "models # ## Graph Convoultional Networks (GCNs) # ### Light Graph Convolution Network", "# # The last cell compares the performance of the different models using", "3.5, 1.0, 0.0) # Binarize validation data val_data_tr = np.where(val_data_tr > 3.5, 1.0,", "df_low_rating.iterrows(): user_old= i['userId'] # old value item_old=i['movieId'] # old value if (test_map_users.get(user_old) is", "= range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123) # In[15]: train_size = 0.75 train,", "Neural Graph Collaorative Filtering, 2019, https://arxiv.org/abs/1905.08108 # 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb #", "git commit -m \\'commit\\' && git push origin \"{branch}\"') # In[7]: import sys", "the top k recommendations from the predictions top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10,", "= ['LightGCN', 'NGCF', 'SVAE', 'SVD++', 'SVD'] comparison = pd.DataFrame(columns=['Algorithm', 'Precision@k', 'Recall@k', 'MAP', 'NDCG'])", "os.path.exists(project_path): get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm /content/mykeys.py') path = \"/content/\" + project_name;", "how=\"outer\") merged = merged[merged['seen'].isnull()].drop('seen', axis=1) # Create filter for users that appear in", "* Precision@k # * Recall@k # * Mean Average Precision (MAP) # *", "= [] for model in [light_model, ngcf_model]: recommendations = model.recommend(users, k=10) recommendations =", "Eval (D^-0.5 * A * D^-0.5). norm_adj_mat = D_inv_sq_root.dot(adj_mat).dot(D_inv_sq_root) # In[20]: # to", "\"{path}\"') import sys; sys.path.append(path) get_ipython().system(u'git config --global user.email \"<EMAIL>\"') get_ipython().system(u'git config --global user.name", "origin \"{branch}\"') get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd \"{project_path}\"') # In[34]: get_ipython().system(u'git status') #", "List of lists. adj_mat.tolil() R = R.tolil() # Put together adjacency matrix. Movies", "matrix for train, validation and test sets train_data, _, _ = am_train.gen_affinity_matrix() val_data,", "'prediction']] # Create column with the predicted movie's rank for each user top_k", "1.0, 0.0) # Binarize test data: testing part (save non-binary version in the", "value item_old=i['movieId'] # old value if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is", "= combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex = pd.merge(train, movie_new, on='movieId', how='left') # Reset", "# # References: # # 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>,", ">= 1) # Obtain both usercount and itemcount after filtering usercount = df[['userId']].groupby('userId',", "# # Models include in order are LightGCN, NGCF, SVAE, SVD++, and SVD.", "the original ratings # Create column with the predicted movie's rank for each", "users in BOTH train and test: {len(set(svd_pred_common['userId']))}\") continue # From the predictions, we", "range(2), encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123) # In[15]: train_size = 0.75 train, test", "# ### Neural Graph Collaborative Filtering (NGCF) # In[23]: ngcf_model = NGCF(A_tilde, n_users", "0 if they didn't). R = sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1", "and going back to the winning algorithm for 2009's Netflix Prize competition, SVD++.", "# old value item_old=i['movieId'] # old value if (test_map_users.get(user_old) is not None) and", "top_movies.groupby('userId', sort=False).cumcount() + 1 top_k = top_movies.copy() top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1", "python # coding: utf-8 # In[1]: import os project_name = \"reco-tut-mlh\"; branch =", "LightGCN vastly outperforms all other models. When compared to SVD++, a widely used", "Remove movies already seen by users # Create column of all 1s temp", "# Load movie titles. fp = os.path.join('./data/bronze', 'u.item') movie_titles = pd.read_csv(fp, sep='|', names=['movieId',", "= n_movies, n_layers = 3) # In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024, optimizer=optimizer)", "Neural Graph Collaborative Filtering (NGCF) # In[23]: ngcf_model = NGCF(A_tilde, n_users = n_users,", "# 3. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb # 4. <NAME>, Netflix Prize and SVD,", "as tf from sklearn.model_selection import train_test_split from tensorflow.python.framework.ops import disable_eager_execution from tqdm import", "our models, we can see that the state-of-the-art model LightGCN vastly outperforms all", "items: predictions.append([user, item, model.predict(user, item).est]) predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction']) # Remove", "points are indices = np.mat([coo.row, coo.col]).transpose() # covert to sparse tensor A_tilde =", "1.0, 0.0) # Binarize test data: training part test_data_tr = np.where(test_data_tr > 3.5,", "matrix back to df recommendations = am_test.map_back_sparse(top_k, kind='prediction') test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') #", "far recommendation systems have advanced since 2009, and how new model architectures with", "tensor A_tilde = tf.SparseTensor(indices, coo.data, coo.shape) A_tilde # # Train models # ##", "with the state-of-the-art LightGCN and going back to the winning algorithm for 2009's", "checkout main') else: get_ipython().magic(u'cd \"{project_path}\"') # In[34]: get_ipython().system(u'git status') # In[35]: get_ipython().system(u'git add", "In[34]: get_ipython().system(u'git status') # In[35]: get_ipython().system(u'git add . && git commit -m \\'commit\\'", "In[7]: import sys sys.path.insert(0, './code') # --- # # Collaborative Filtering Comparison #", "# Train models # ## Graph Convoultional Networks (GCNs) # ### Light Graph", "# Create user-item graph (sparse matix where users are rows and movies are", "comparison # # References: # # 1. <NAME>, <NAME>, <NAME>, <NAME>, <NAME> &", "1.0, 0.0) # retrieve real ratings from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i", "the span of just 1-2 years. # In[32]: model_names = ['LightGCN', 'NGCF', 'SVAE',", "starting with the state-of-the-art LightGCN and going back to the winning algorithm for", "In[28]: # Model prediction on the training part of test set top_k =", "movie, 0 if they didn't). R = sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] =", "top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies", "recommendations = recommendations.merge(movie_titles, how='left', on='movieId' )[['userId', 'movieId', 'title', 'prediction']] # Create column with", "* 2)] val_users = unique_users[(n_users - HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)]", "From the predictions, we want only the top k for each user, #", "= test_reindex['userId'] - 1 test_reindex = test_reindex[['userId_new', 'movieId_new', 'rating']] # Create dictionaries so", "1 top_k = top_movies.copy() top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 # For each", "# # # Imports # In[4]: get_ipython().system(u'pip install -q surprise') # In[8]: import", "developed in the span of just 1-2 years. # In[32]: model_names = ['LightGCN',", "--global user.email \"<EMAIL>\"') get_ipython().system(u'git config --global user.name \"reco-tut\"') get_ipython().system(u'git init') get_ipython().system(u'git remote add", "from models import SVAE from models.GCN import LightGCN, NGCF # # Prepare data", "# In[27]: disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0], original_dim=train_data.shape[1], intermediate_dim=200, latent_dim=64, n_epochs=400, batch_size=100, k=10, verbose=0,", ">= 4) df_preferred = raw_data[raw_data['rating'] > 3.5] df_low_rating = raw_data[raw_data['rating'] <= 3.5] df", "am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the original ratings # Create column with", "from the predictions top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId',", "val_data_te_ratings = val_data_te.copy() val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0) # Binarize test", "rank for each user top_k = recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1", "= metrics.mean_average_precision(rec, tester, 'userId', 'movieId', 'rank') ndcg = metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank')", "pd.read_csv(fp, sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123) # In[10]: #", "= np.where(test_data > 3.5, 1.0, 0.0) # Binarize validation data val_data_tr = np.where(val_data_tr", "dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1 # Create the adjaceny matrix with the user-item", "# ### Recommend with LightGCN and NGCF # In[24]: # Convert test user", "random_state=123) # In[10]: # Load movie titles. fp = os.path.join('./data/bronze', 'u.item') movie_titles =", "train, validation and test sets train_data, _, _ = am_train.gen_affinity_matrix() val_data, val_map_users, val_map_items", "config --global user.name \"reco-tut\"') get_ipython().system(u'git init') get_ipython().system(u'git remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull", "pd import random import requests import scipy.sparse as sp import surprise import tensorflow", "Reset index to 0-n_users. train_reindex['userId_new'] = train_reindex['userId'] - 1 train_reindex = train_reindex[['userId_new', 'movieId_new',", "A_tilde # # Train models # ## Graph Convoultional Networks (GCNs) # ###", "### Recommend with SVAE # In[28]: # Model prediction on the training part", "n_movies) # In[17]: # Create DataFrame with reset index of 0-n_movies. movie_new =", "raw_data[raw_data['rating'] <= 3.5] df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5) df = df.groupby('movieId').filter(lambda", "and movies are columns. # 1 if a user reviewed that movie, 0", "<NAME>, <NAME> & <NAME>, LightGCN: Simplifying and Powering Graph Convolution Network for Recommendation,", "the performance of the different models using ranking metrics: # # # *", "metrics: # # # * Precision@k # * Recall@k # * Mean Average", "in zip(recs, model_names): tester = test_df if name == 'SVAE' else test pak", "In[19]: # Calculate degree matrix D (for every row count the number of", "n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train) # ### Recommend with SVD++ and SVD # In[31]:", "users # Create column of all 1s temp = train[['userId', 'movieId']].copy() temp['seen'] =", "an increase in **Percision@k by 29%, Recall@k by 18%, MAP by 12%, and", "sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32) # List of lists. adj_mat.tolil() R", "n_movies, n_users + n_movies), dtype=np.float32) # List of lists. adj_mat.tolil() R = R.tolil()", "metrics from models import SVAE from models.GCN import LightGCN, NGCF # # Prepare", "rows and movies are columns. # 1 if a user reviewed that movie,", "for train, validation and test sets # use list of unique items from", "train_reindex['userId'] - 1 train_reindex = train_reindex[['userId_new', 'movieId_new', 'rating']] test_reindex = pd.merge(test, movie_new, on='movieId',", "recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that", "each user top_k = recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For", "= recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only", "SVD # In[30]: svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True) svd.fit(surprise_train) # ### Recommend", "movies recommendations that are also in the test set recs.append(top_k) # # Compare", "None) : user_new=val_map_users.get(user_old) # new value item_new=val_map_items.get(item_old) # new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating", "value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is not", "of users:', n_users) n_movies = combined['movieId'].nunique() print('Number of movies:', n_movies) # In[17]: #", "1 if a user reviewed that movie, 0 if they didn't). R =", "simply removing unnecessary operations. # # In conclusion, this demonstrates how far recommendation", "own individual notebooks where we go more indepth, especially LightGCN and NGCF, where", "kind='ratings') # use test_data_te_, with the original ratings # Create column with the", "how='left') # Reset index to 0-n_users. train_reindex['userId_new'] = train_reindex['userId'] - 1 train_reindex =", "for users that appear in both the train and test set common_users =", "18%, MAP by 12%, and NDCG by 35%**. # # NGCF is the", "\"reco-tut\"') get_ipython().system(u'git init') get_ipython().system(u'git remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull origin \"{branch}\"') get_ipython().system(u'git", "# use test_data_te_, with the original ratings # Create column with the predicted", "how='left') # Reset index to 0-n_users. test_reindex['userId_new'] = test_reindex['userId'] - 1 test_reindex =", "# # where $k=10$ # # # # Imports # In[4]: get_ipython().system(u'pip install", "Create column with the predicted movie's rank for each user top_k = recommendations.copy()", "<NAME>, <NAME>, <NAME>, <NAME>, <NAME> & <NAME>, LightGCN: Simplifying and Powering Graph Convolution", "sys; sys.path.append(path) get_ipython().system(u'git config --global user.email \"<EMAIL>\"') get_ipython().system(u'git config --global user.name \"reco-tut\"') get_ipython().system(u'git", "35%**. # # NGCF is the older sister model to LightGCN, but only", "in test['userId'].unique()]) recs = [] for model in [light_model, ngcf_model]: recommendations = model.recommend(users,", "# covert to sparse tensor A_tilde = tf.SparseTensor(indices, coo.data, coo.shape) A_tilde # #", "svd]: predictions = [] users = train['userId'].unique() items = train['movieId'].unique() for user in", "generation for train, validation and test sets # use list of unique items", "= pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction']) # Remove movies already seen by users #", "NGCF by simply removing unnecessary operations. # # In conclusion, this demonstrates how", "model in [svdpp, svd]: predictions = [] users = train['userId'].unique() items = train['movieId'].unique()", "R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1 # Create the adjaceny matrix with the user-item graph.", "= numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123) # Binarize train,", "'movieId', 'rank') comparison.loc[len(comparison)] = [name, pak, rak, map, ndcg] # In[33]: comparison #", "prediction on the training part of test set top_k = svae_model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True) # Convert", "Prepare data # In[9]: fp = os.path.join('./data/bronze', 'u.data') raw_data = pd.read_csv(fp, sep='\\t', names=['userId',", "encoding='iso-8859-1') print(f'Shape: {movie_titles.shape}') movie_titles.sample(10, random_state=123) # In[15]: train_size = 0.75 train, test =", "= stratified_split(raw_data, 'userId', train_size) print(f'Train Shape: {train.shape}') print(f'Test Shape: {test.shape}') print(f'Do they have", "train.append(test) n_users = combined['userId'].nunique() print('Number of users:', n_users) n_movies = combined['movieId'].nunique() print('Number of", "'userId', train_size) print(f'Train Shape: {train.shape}') print(f'Test Shape: {test.shape}') print(f'Do they have the same", "= os.path.join('./data/bronze', 'u.item') movie_titles = pd.read_csv(fp, sep='|', names=['movieId', 'title'], usecols = range(2), encoding='iso-8859-1')", "'NDCG']) # Convert test user ids to the new ids users = np.array([user2id[x]", "sets am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items)", "> 3.5, 1.0, 0.0) val_data = np.where(val_data > 3.5, 1.0, 0.0) test_data =", "MAP by 12%, and NDCG by 35%**. # # NGCF is the older", "predictions.append([user, item, model.predict(user, item).est]) predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction']) # Remove movies", "# Binarize test data: testing part (save non-binary version in the separate object,", "the data (only keep ratings >= 4) df_preferred = raw_data[raw_data['rating'] > 3.5] df_low_rating", "= False).size() itemcount = df[['movieId']].groupby('movieId', as_index = False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users =", "print(f'Train Shape: {train.shape}') print(f'Test Shape: {test.shape}') print(f'Do they have the same users?: {set(train.userId)", "n_items = n_movies, n_layers = 3) # In[22]: optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2) light_model.fit(epochs=25, batch_size=1024,", "in the test set recs.append(top_k) # ## Standard Variational Autoencoder (SVAE) # In[26]:", "users = np.array([user2id[x] for x in test['userId'].unique()]) for rec, name in zip(recs, model_names):", "= numpy_stratified_split(test_data, ratio=0.75, seed=123) # Binarize train, validation and test data train_data =", "'./code') # --- # # Collaborative Filtering Comparison # # In this notebook", "account = \"sparsh-ai\" project_path = os.path.join('/content', project_name) # In[2]: if not os.path.exists(project_path): get_ipython().system(u'cp", "NGCF # # Prepare data # In[9]: fp = os.path.join('./data/bronze', 'u.data') raw_data =", "nonzero entries) D_values = np.array(adj_mat.sum(1)) # Square root and inverse. D_inv_values = np.power(D_values", "am_val.gen_affinity_matrix() test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix() # Split validation and test data into", "test_map_users, test_map_items = am_test.gen_affinity_matrix() # Split validation and test data into training and", "= R.tolil() # Put together adjacency matrix. Movies and users are nodes/vertices. #", "np.where(test_data > 3.5, 1.0, 0.0) # Binarize validation data val_data_tr = np.where(val_data_tr >", "<NAME>, <NAME>, <NAME> & <NAME>, LightGCN: Simplifying and Powering Graph Convolution Network for", "that are also in the test set recs.append(top_k) # ## Singular Value Decomposition", "are NOT equal') print(f\"# of users in train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\")", "dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in df_low_rating.iterrows(): user_old= i['userId'] # old value item_old=i['movieId']", "keep ratings >= 4) df_preferred = raw_data[raw_data['rating'] > 3.5] df_low_rating = raw_data[raw_data['rating'] <=", "0.75 train, test = stratified_split(raw_data, 'userId', train_size) print(f'Train Shape: {train.shape}') print(f'Test Shape: {test.shape}')", "x in test['userId'].unique()]) for rec, name in zip(recs, model_names): tester = test_df if", "verbose=True) svdpp.fit(surprise_train) # ### SVD # In[30]: svd = surprise.SVD(random_state=0, n_factors=64, n_epochs=10, verbose=True)", "= set(test['userId']).intersection(set(predictions['userId'])) # Filter the test and predictions so they have the same", "DataFrame with reset index of 0-n_movies. movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex", "Reset index to 0-n_users. test_reindex['userId_new'] = test_reindex['userId'] - 1 test_reindex = test_reindex[['userId_new', 'movieId_new',", "set(test.userId)}') # In[16]: combined = train.append(test) n_users = combined['userId'].nunique() print('Number of users:', n_users)", "In[10]: # Load movie titles. fp = os.path.join('./data/bronze', 'u.item') movie_titles = pd.read_csv(fp, sep='|',", "test_reindex = pd.merge(test, movie_new, on='movieId', how='left') # Reset index to 0-n_users. test_reindex['userId_new'] =", "use list of unique items from training set for all sets am_train =", "SVD. Each model has their own individual notebooks where we go more indepth,", "random import requests import scipy.sparse as sp import surprise import tensorflow as tf", "seed=123, drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val ) #", "get_ipython().system(u'git remote add origin https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull origin \"{branch}\"') get_ipython().system(u'git checkout main') else:", "raw_data = pd.read_csv(fp, sep='\\t', names=['userId', 'movieId', 'rating', 'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123) #", "degree matrix D (for every row count the number of nonzero entries) D_values", "0.0) # retrieve real ratings from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in", "ranking metrics: # # # * Precision@k # * Recall@k # * Mean", "and predictions so they have the same users between them test_common = test[test['userId'].isin(common_users)]", "for Recommendation, 2020, https://arxiv.org/abs/2002.02126 # 2. <NAME>, <NAME>, <NAME>, <NAME>, & <NAME>, Neural", "matrix D (for every row count the number of nonzero entries) D_values =", "new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy() test_data_te_ratings=test_data_te_ratings.to_numpy() # In[27]: disable_eager_execution() svae_model = SVAE.StandardVAE(n_users=train_data.shape[0],", "main') else: get_ipython().magic(u'cd \"{project_path}\"') # In[34]: get_ipython().system(u'git status') # In[35]: get_ipython().system(u'git add .", "itemcount = df[['movieId']].groupby('movieId', as_index = False).size() unique_users =sorted(df.userId.unique()) np.random.seed(123) unique_users = np.random.permutation(unique_users) HELDOUT_USERS", "by 29%, Recall@k by 18%, MAP by 12%, and NDCG by 35%**. #", "drop_encoder=0.5, drop_decoder=0.5, annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val ) # ###", "initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in df_low_rating.iterrows(): user_old= i['userId'] # old value", "test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123) # Binarize train, validation and test data", "1.0, 0.0) # Binarize validation data val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0)", "the new ids users = np.array([user2id[x] for x in test['userId'].unique()]) recs = []", "Filtering (NGCF) # In[23]: ngcf_model = NGCF(A_tilde, n_users = n_users, n_items = n_movies,", "top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1", "last cell compares the performance of the different models using ranking metrics: #", "= model.recommend(users, k=10) recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item}) recommendations = recommendations.merge(movie_titles, how='left',", "tester, 'userId', 'movieId', 'rank') comparison.loc[len(comparison)] = [name, pak, rak, map, ndcg] # In[33]:", "demonstrates how far recommendation systems have advanced since 2009, and how new model", "SVD++. # # Models include in order are LightGCN, NGCF, SVAE, SVD++, and", "import tqdm from utils import stratified_split, numpy_stratified_split import build_features import metrics from models", "movies already seen by users # Create column of all 1s temp =", "to SVD++, a widely used algorithm during the Netflix Prize competition, LightGCN achieves", "training set for all sets am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items)", "item, model.predict(user, item).est]) predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction']) # Remove movies already", "'rating', 'timestamp']) print(f'Shape: {raw_data.shape}') raw_data.sample(10, random_state=123) # In[10]: # Load movie titles. fp", "In[23]: ngcf_model = NGCF(A_tilde, n_users = n_users, n_items = n_movies, n_layers = 3", "Graph Convolution Network for Recommendation, 2020, https://arxiv.org/abs/2002.02126 # 2. <NAME>, <NAME>, <NAME>, <NAME>,", "/content/drive/MyDrive/mykeys.py /content') import mykeys get_ipython().system(u'rm /content/mykeys.py') path = \"/content/\" + project_name; get_ipython().system(u'mkdir \"{path}\"')", "state-of-the-art model LightGCN vastly outperforms all other models. When compared to SVD++, a", ". && git commit -m \\'commit\\' && git push origin \"{branch}\"') # In[7]:", "test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in df_low_rating.iterrows(): user_old= i['userId'] # old value item_old=i['movieId'] #", "and SVD # In[31]: for model in [svdpp, svd]: predictions = [] users", "user.email \"<EMAIL>\"') get_ipython().system(u'git config --global user.name \"reco-tut\"') get_ipython().system(u'git init') get_ipython().system(u'git remote add origin", "# ### Recommend with SVAE # In[28]: # Model prediction on the training", "= n_users, n_items = n_movies, n_layers = 3 ) ngcf_model.fit(epochs=25, batch_size=1024, optimizer=optimizer) #", "indices = np.mat([coo.row, coo.col]).transpose() # covert to sparse tensor A_tilde = tf.SparseTensor(indices, coo.data,", "Light Graph Convolution Network (LightGCN) # In[21]: light_model = LightGCN(A_tilde, n_users = n_users,", "where we go more indepth, especially LightGCN and NGCF, where we implemented them", "am_test.gen_affinity_matrix() # Split validation and test data into training and testing parts val_data_tr,", "implemented them from scratch in Tensorflow. # # The last cell compares the", "connected. adj_mat[:n_users, n_users:] = R adj_mat[n_users:, :n_users] = R.T adj_mat # In[19]: #", "train[['userId', 'movieId']].copy() temp['seen'] = 1 # Outer join and remove movies that have", "they didn't). R = sp.dok_matrix((n_users, n_movies), dtype=np.float32) R[train_reindex['userId_new'], train_reindex['movieId_new']] = 1 # Create", "import mykeys get_ipython().system(u'rm /content/mykeys.py') path = \"/content/\" + project_name; get_ipython().system(u'mkdir \"{path}\"') get_ipython().magic(u'cd \"{path}\"')", "= df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set =", "train, test = stratified_split(raw_data, 'userId', train_size) print(f'Train Shape: {train.shape}') print(f'Test Shape: {test.shape}') print(f'Do", "0-n_movies. movie_new = combined[['movieId']].drop_duplicates() movie_new['movieId_new'] = np.arange(len(movie_new)) train_reindex = pd.merge(train, movie_new, on='movieId', how='left')", "3.5, 1.0, 0.0) # retrieve real ratings from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for", "= build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain the sparse matrix for", "NOT equal') print(f\"# of users in train and test respectively: {len(set(merged['userId']))}, {len(set(test['userId']))}\") print(f\"#", "new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is", "= np.where(val_data_tr > 3.5, 1.0, 0.0) val_data_te_ratings = val_data_te.copy() val_data_te = np.where(val_data_te >", "is not None) and (test_map_items.get(item_old) is not None) : user_new=test_map_users.get(user_old) # new value", "= np.mat([coo.row, coo.col]).transpose() # covert to sparse tensor A_tilde = tf.SparseTensor(indices, coo.data, coo.shape)", "commit -m \\'commit\\' && git push origin \"{branch}\"') # In[7]: import sys sys.path.insert(0,", "graph. adj_mat = sp.dok_matrix((n_users + n_movies, n_users + n_movies), dtype=np.float32) # List of", "train_users = unique_users[:(n_users - HELDOUT_USERS * 2)] val_users = unique_users[(n_users - HELDOUT_USERS *", "predicted movie's rank for each user top_k = recommendations.copy() top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount()", "top k for each user, # not all the recommendations. # Extract the", "SVD++ # In[29]: surprise_train = surprise.Dataset.load_from_df(train.drop('timestamp', axis=1), reader=surprise.Reader('ml-100k')).build_full_trainset() svdpp = surprise.SVDpp(random_state=0, n_factors=64, n_epochs=10,", "ngcf_model = NGCF(A_tilde, n_users = n_users, n_items = n_movies, n_layers = 3 )", "top_k = top_movies.copy() top_k['rank'] = top_movies.groupby('userId', sort=False).cumcount() + 1 # For each user,", "recommendations. # Extract the top k recommendations from the predictions top_movies = svd_pred_common.groupby('userId',", "= np.array([user2id[x] for x in test['userId'].unique()]) recs = [] for model in [light_model,", "Compare performance # Looking at all 5 of our models, we can see", "'Precision@k', 'Recall@k', 'MAP', 'NDCG']) # Convert test user ids to the new ids", "and remove movies that have alread been seen (seen=1) merged = pd.merge(temp, predictions,", "they have the same users?: {set(train.userId) == set(test.userId)}') # In[16]: combined = train.append(test)", "model.recommend(users, k=10) recommendations = recommendations.replace({'userId': id2user, 'movieId': id2item}) recommendations = recommendations.merge(movie_titles, how='left', on='movieId'", "annealing=False, beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val ) # ### Recommend with", "name in zip(recs, model_names): tester = test_df if name == 'SVAE' else test", "Square root and inverse. D_inv_values = np.power(D_values + 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0", ": user_new=val_map_users.get(user_old) # new value item_new=val_map_items.get(item_old) # new value rating=i['rating'] val_data_te_ratings.at[user_new,item_new]= rating val_data_te_ratings=val_data_te_ratings.to_numpy()", "beta=1.0 ) svae_model.fit(x_train=train_data, x_valid=val_data, x_val_tr=val_data_tr, x_val_te=val_data_te_ratings, mapper=am_val ) # ### Recommend with SVAE", "ids to the new ids users = np.array([user2id[x] for x in test['userId'].unique()]) recs", "usercount = df[['userId']].groupby('userId', as_index = False).size() itemcount = df[['movieId']].groupby('movieId', as_index = False).size() unique_users", "= combined['userId'].nunique() print('Number of users:', n_users) n_movies = combined['movieId'].nunique() print('Number of movies:', n_movies)", "# Create the adjaceny matrix with the user-item graph. adj_mat = sp.dok_matrix((n_users +", "= metrics.precision_at_k(rec, tester, 'userId', 'movieId', 'rank') rak = metrics.recall_at_k(rec, tester, 'userId', 'movieId', 'rank')", "combined['movieId'].nunique() print('Number of movies:', n_movies) # In[17]: # Create DataFrame with reset index", "# Binarize the data (only keep ratings >= 4) df_preferred = raw_data[raw_data['rating'] >", "get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd \"{project_path}\"') # In[34]: get_ipython().system(u'git status') # In[35]: get_ipython().system(u'git", "# * Precision@k # * Recall@k # * Mean Average Precision (MAP) #", "else: get_ipython().magic(u'cd \"{project_path}\"') # In[34]: get_ipython().system(u'git status') # In[35]: get_ipython().system(u'git add . &&", "sys sys.path.insert(0, './code') # --- # # Collaborative Filtering Comparison # # In", "import os project_name = \"reco-tut-mlh\"; branch = \"main\"; account = \"sparsh-ai\" project_path =", "also in the test set recs.append(top_k) # ## Standard Variational Autoencoder (SVAE) #", "testing parts val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123) test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75,", "val_set = df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set", "matrix. Movies and users are nodes/vertices. # 1 if the movie and user", "Convoultional Networks (GCNs) # ### Light Graph Convolution Network (LightGCN) # In[21]: light_model", "svd_pred_common = merged[merged['userId'].isin(common_users)] if len(set(merged['userId'])) != len(set(test['userId'])): print('Number of users in train and", "# In[23]: ngcf_model = NGCF(A_tilde, n_users = n_users, n_items = n_movies, n_layers =", "real ratings from initial dataset test_data_te_ratings=pd.DataFrame(test_data_te_ratings) val_data_te_ratings=pd.DataFrame(val_data_te_ratings) for index,i in df_low_rating.iterrows(): user_old= i['userId']", "k recommendations from the predictions top_movies = svd_pred_common.groupby('userId', as_index=False).apply(lambda x: x.nlargest(10, 'prediction')).reset_index(drop=True) top_movies['rank']", "df.loc[df['userId'].isin(val_users)] test_set = df.loc[df['userId'].isin(test_users)] unique_train_items = pd.unique(train_set['movieId']) val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)] test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)]", "order are LightGCN, NGCF, SVAE, SVD++, and SVD. Each model has their own", "performance of the different models using ranking metrics: # # # * Precision@k", "# new value item_new=test_map_items.get(item_old) # new value rating=i['rating'] test_data_te_ratings.at[user_new,item_new]= rating if (val_map_users.get(user_old) is", "systems have advanced since 2009, and how new model architectures with notable performance", "index,i in df_low_rating.iterrows(): user_old= i['userId'] # old value item_old=i['movieId'] # old value if", "# # Imports # In[4]: get_ipython().system(u'pip install -q surprise') # In[8]: import math", "In[8]: import math import numpy as np import os import pandas as pd", "= build_features.AffinityMatrix(df=train_set, items_list=unique_train_items) am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items) am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items) # Obtain", "predictions = pd.DataFrame(predictions, columns=['userId', 'movieId', 'prediction']) # Remove movies already seen by users", "# old value if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is not None)", "https://\"{mykeys.git_token}\":x-oauth-basic@github.com/\"{account}\"/\"{project_name}\".git') get_ipython().system(u'git pull origin \"{branch}\"') get_ipython().system(u'git checkout main') else: get_ipython().magic(u'cd \"{project_path}\"') # In[34]:", "metrics.ndcg(rec, tester, 'userId', 'movieId', 'rank') comparison.loc[len(comparison)] = [name, pak, rak, map, ndcg] #", "'MAP', 'NDCG']) # Convert test user ids to the new ids users =", "of D^(-0.5) are the diagonals. D_inv_sq_root = sp.diags(D_inv_values) # Eval (D^-0.5 * A", "# List of lists. adj_mat.tolil() R = R.tolil() # Put together adjacency matrix.", "np.array([user2id[x] for x in test['userId'].unique()]) for rec, name in zip(recs, model_names): tester =", "# ### Recommend with SVD++ and SVD # In[31]: for model in [svdpp,", "unique_users = np.random.permutation(unique_users) HELDOUT_USERS = 200 # Create train/validation/test users n_users = len(unique_users)", "merged = pd.merge(temp, predictions, on=['userId', 'movieId'], how=\"outer\") merged = merged[merged['seen'].isnull()].drop('seen', axis=1) # Create", "sparse matrix with the values of D^(-0.5) are the diagonals. D_inv_sq_root = sp.diags(D_inv_values)", "-0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0 # Create sparse matrix with the values of D^(-0.5)", "= dict(zip(movie_new['movieId'], movie_new['movieId_new'])) id2item = dict(zip(movie_new['movieId_new'], movie_new['movieId'])) user2id = dict(zip(train['userId'], train_reindex['userId_new'])) id2user =", "= am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the original ratings # Create column", "+ 1e-9, -0.5).flatten() D_inv_values[np.isinf(D_inv_values)] = 0.0 # Create sparse matrix with the values", "test set recs.append(top_k) # # Compare performance # Looking at all 5 of" ]
[ "os import sys import time from contextlib import suppress from signal import SIGTERM", "sys import time from contextlib import suppress from signal import SIGTERM class Daemon:", "return int(fh.read().strip()) def error(self, message): sys.stderr.write(f\"{message}\\n\") sys.exit(1) def restart(self): self.stop() self.start() def run(self):", "try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError as err: e = str(err.args)", "os.umask(0) self.fork() atexit.register(self.delete_pidfile) self.create_pidfile() def fork(self): try: if os.fork() > 0: sys.exit(0) except", "as fh: fh.write(str(os.getpid()) + \"\\n\") def get_pidfile(self): with open(self.pidfile, \"r\") as fh: return", "process. Reason: {err}\\n\") def delete_pidfile(self): with suppress(FileNotFoundError): os.remove(self.pidfile) def create_pidfile(self): with open(self.pidfile, \"w+\")", "self.fork() atexit.register(self.delete_pidfile) self.create_pidfile() def fork(self): try: if os.fork() > 0: sys.exit(0) except OSError", "SIGTERM) time.sleep(0.1) except OSError as err: e = str(err.args) if e.find(\"No such process\")", "os.fork() > 0: sys.exit(0) except OSError as err: self.error(f\"failed to fork a child", "\"r\") as fh: return int(fh.read().strip()) def error(self, message): sys.stderr.write(f\"{message}\\n\") sys.exit(1) def restart(self): self.stop()", "time from contextlib import suppress from signal import SIGTERM class Daemon: def __init__(self,", "as fh: return int(fh.read().strip()) def error(self, message): sys.stderr.write(f\"{message}\\n\") sys.exit(1) def restart(self): self.stop() self.start()", "try: if os.fork() > 0: sys.exit(0) except OSError as err: self.error(f\"failed to fork", "while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError as err: e = str(err.args) if", "IOError: pass finally: self.daemonize() self.run() def stop(self): try: pid = self.get_pidfile() except IOError:", "to fork a child process. Reason: {err}\\n\") def delete_pidfile(self): with suppress(FileNotFoundError): os.remove(self.pidfile) def", "a child process. Reason: {err}\\n\") def delete_pidfile(self): with suppress(FileNotFoundError): os.remove(self.pidfile) def create_pidfile(self): with", "def error(self, message): sys.stderr.write(f\"{message}\\n\") sys.exit(1) def restart(self): self.stop() self.start() def run(self): raise NotImplementedError", "def start(self): try: self.get_pidfile() except IOError: pass finally: self.daemonize() self.run() def stop(self): try:", "err: e = str(err.args) if e.find(\"No such process\") > 0: self.delete_pidfile() else: sys.exit(1)", "except OSError as err: self.error(f\"failed to fork a child process. Reason: {err}\\n\") def", "os.kill(pid, SIGTERM) time.sleep(0.1) except OSError as err: e = str(err.args) if e.find(\"No such", "def delete_pidfile(self): with suppress(FileNotFoundError): os.remove(self.pidfile) def create_pidfile(self): with open(self.pidfile, \"w+\") as fh: fh.write(str(os.getpid())", "delete_pidfile(self): with suppress(FileNotFoundError): os.remove(self.pidfile) def create_pidfile(self): with open(self.pidfile, \"w+\") as fh: fh.write(str(os.getpid()) +", "> 0: self.delete_pidfile() else: sys.exit(1) def daemonize(self): self.fork() os.chdir(\"/\") os.setsid() os.umask(0) self.fork() atexit.register(self.delete_pidfile)", "self.error(f\"failed to fork a child process. Reason: {err}\\n\") def delete_pidfile(self): with suppress(FileNotFoundError): os.remove(self.pidfile)", "0: self.delete_pidfile() else: sys.exit(1) def daemonize(self): self.fork() os.chdir(\"/\") os.setsid() os.umask(0) self.fork() atexit.register(self.delete_pidfile) self.create_pidfile()", "\"\\n\") def get_pidfile(self): with open(self.pidfile, \"r\") as fh: return int(fh.read().strip()) def error(self, message):", "try: pid = self.get_pidfile() except IOError: return try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1)", "class Daemon: def __init__(self, pidfile=None): self.pidfile = pidfile or os.path.join(\"/var/run/exhal.service\") def start(self): try:", "from signal import SIGTERM class Daemon: def __init__(self, pidfile=None): self.pidfile = pidfile or", "atexit.register(self.delete_pidfile) self.create_pidfile() def fork(self): try: if os.fork() > 0: sys.exit(0) except OSError as", "os.path.join(\"/var/run/exhal.service\") def start(self): try: self.get_pidfile() except IOError: pass finally: self.daemonize() self.run() def stop(self):", "def fork(self): try: if os.fork() > 0: sys.exit(0) except OSError as err: self.error(f\"failed", "child process. Reason: {err}\\n\") def delete_pidfile(self): with suppress(FileNotFoundError): os.remove(self.pidfile) def create_pidfile(self): with open(self.pidfile,", "import sys import time from contextlib import suppress from signal import SIGTERM class", "process\") > 0: self.delete_pidfile() else: sys.exit(1) def daemonize(self): self.fork() os.chdir(\"/\") os.setsid() os.umask(0) self.fork()", "self.run() def stop(self): try: pid = self.get_pidfile() except IOError: return try: while 1:", "except OSError as err: e = str(err.args) if e.find(\"No such process\") > 0:", "open(self.pidfile, \"r\") as fh: return int(fh.read().strip()) def error(self, message): sys.stderr.write(f\"{message}\\n\") sys.exit(1) def restart(self):", "pid = self.get_pidfile() except IOError: return try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except", "self.pidfile = pidfile or os.path.join(\"/var/run/exhal.service\") def start(self): try: self.get_pidfile() except IOError: pass finally:", "e.find(\"No such process\") > 0: self.delete_pidfile() else: sys.exit(1) def daemonize(self): self.fork() os.chdir(\"/\") os.setsid()", "= str(err.args) if e.find(\"No such process\") > 0: self.delete_pidfile() else: sys.exit(1) def daemonize(self):", "\"w+\") as fh: fh.write(str(os.getpid()) + \"\\n\") def get_pidfile(self): with open(self.pidfile, \"r\") as fh:", "def get_pidfile(self): with open(self.pidfile, \"r\") as fh: return int(fh.read().strip()) def error(self, message): sys.stderr.write(f\"{message}\\n\")", "IOError: return try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError as err: e", "self.create_pidfile() def fork(self): try: if os.fork() > 0: sys.exit(0) except OSError as err:", "self.daemonize() self.run() def stop(self): try: pid = self.get_pidfile() except IOError: return try: while", "int(fh.read().strip()) def error(self, message): sys.stderr.write(f\"{message}\\n\") sys.exit(1) def restart(self): self.stop() self.start() def run(self): raise", "daemonize(self): self.fork() os.chdir(\"/\") os.setsid() os.umask(0) self.fork() atexit.register(self.delete_pidfile) self.create_pidfile() def fork(self): try: if os.fork()", "os.chdir(\"/\") os.setsid() os.umask(0) self.fork() atexit.register(self.delete_pidfile) self.create_pidfile() def fork(self): try: if os.fork() > 0:", "fh: fh.write(str(os.getpid()) + \"\\n\") def get_pidfile(self): with open(self.pidfile, \"r\") as fh: return int(fh.read().strip())", "pidfile or os.path.join(\"/var/run/exhal.service\") def start(self): try: self.get_pidfile() except IOError: pass finally: self.daemonize() self.run()", "SIGTERM class Daemon: def __init__(self, pidfile=None): self.pidfile = pidfile or os.path.join(\"/var/run/exhal.service\") def start(self):", "Reason: {err}\\n\") def delete_pidfile(self): with suppress(FileNotFoundError): os.remove(self.pidfile) def create_pidfile(self): with open(self.pidfile, \"w+\") as", "else: sys.exit(1) def daemonize(self): self.fork() os.chdir(\"/\") os.setsid() os.umask(0) self.fork() atexit.register(self.delete_pidfile) self.create_pidfile() def fork(self):", "atexit import os import sys import time from contextlib import suppress from signal", "os.remove(self.pidfile) def create_pidfile(self): with open(self.pidfile, \"w+\") as fh: fh.write(str(os.getpid()) + \"\\n\") def get_pidfile(self):", "OSError as err: self.error(f\"failed to fork a child process. Reason: {err}\\n\") def delete_pidfile(self):", "contextlib import suppress from signal import SIGTERM class Daemon: def __init__(self, pidfile=None): self.pidfile", "pass finally: self.daemonize() self.run() def stop(self): try: pid = self.get_pidfile() except IOError: return", "from contextlib import suppress from signal import SIGTERM class Daemon: def __init__(self, pidfile=None):", "with open(self.pidfile, \"r\") as fh: return int(fh.read().strip()) def error(self, message): sys.stderr.write(f\"{message}\\n\") sys.exit(1) def", "if e.find(\"No such process\") > 0: self.delete_pidfile() else: sys.exit(1) def daemonize(self): self.fork() os.chdir(\"/\")", "def daemonize(self): self.fork() os.chdir(\"/\") os.setsid() os.umask(0) self.fork() atexit.register(self.delete_pidfile) self.create_pidfile() def fork(self): try: if", "0: sys.exit(0) except OSError as err: self.error(f\"failed to fork a child process. Reason:", "if os.fork() > 0: sys.exit(0) except OSError as err: self.error(f\"failed to fork a", "err: self.error(f\"failed to fork a child process. Reason: {err}\\n\") def delete_pidfile(self): with suppress(FileNotFoundError):", "fh: return int(fh.read().strip()) def error(self, message): sys.stderr.write(f\"{message}\\n\") sys.exit(1) def restart(self): self.stop() self.start() def", "start(self): try: self.get_pidfile() except IOError: pass finally: self.daemonize() self.run() def stop(self): try: pid", "sys.exit(1) def daemonize(self): self.fork() os.chdir(\"/\") os.setsid() os.umask(0) self.fork() atexit.register(self.delete_pidfile) self.create_pidfile() def fork(self): try:", "fork a child process. Reason: {err}\\n\") def delete_pidfile(self): with suppress(FileNotFoundError): os.remove(self.pidfile) def create_pidfile(self):", "get_pidfile(self): with open(self.pidfile, \"r\") as fh: return int(fh.read().strip()) def error(self, message): sys.stderr.write(f\"{message}\\n\") sys.exit(1)", "import atexit import os import sys import time from contextlib import suppress from", "sys.exit(0) except OSError as err: self.error(f\"failed to fork a child process. Reason: {err}\\n\")", "pidfile=None): self.pidfile = pidfile or os.path.join(\"/var/run/exhal.service\") def start(self): try: self.get_pidfile() except IOError: pass", "as err: self.error(f\"failed to fork a child process. Reason: {err}\\n\") def delete_pidfile(self): with", "create_pidfile(self): with open(self.pidfile, \"w+\") as fh: fh.write(str(os.getpid()) + \"\\n\") def get_pidfile(self): with open(self.pidfile,", "self.get_pidfile() except IOError: return try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError as", "__init__(self, pidfile=None): self.pidfile = pidfile or os.path.join(\"/var/run/exhal.service\") def start(self): try: self.get_pidfile() except IOError:", "+ \"\\n\") def get_pidfile(self): with open(self.pidfile, \"r\") as fh: return int(fh.read().strip()) def error(self,", "def create_pidfile(self): with open(self.pidfile, \"w+\") as fh: fh.write(str(os.getpid()) + \"\\n\") def get_pidfile(self): with", "or os.path.join(\"/var/run/exhal.service\") def start(self): try: self.get_pidfile() except IOError: pass finally: self.daemonize() self.run() def", "as err: e = str(err.args) if e.find(\"No such process\") > 0: self.delete_pidfile() else:", "fork(self): try: if os.fork() > 0: sys.exit(0) except OSError as err: self.error(f\"failed to", "self.fork() os.chdir(\"/\") os.setsid() os.umask(0) self.fork() atexit.register(self.delete_pidfile) self.create_pidfile() def fork(self): try: if os.fork() >", "suppress(FileNotFoundError): os.remove(self.pidfile) def create_pidfile(self): with open(self.pidfile, \"w+\") as fh: fh.write(str(os.getpid()) + \"\\n\") def", "def stop(self): try: pid = self.get_pidfile() except IOError: return try: while 1: os.kill(pid,", "= self.get_pidfile() except IOError: return try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError", "def __init__(self, pidfile=None): self.pidfile = pidfile or os.path.join(\"/var/run/exhal.service\") def start(self): try: self.get_pidfile() except", "str(err.args) if e.find(\"No such process\") > 0: self.delete_pidfile() else: sys.exit(1) def daemonize(self): self.fork()", "signal import SIGTERM class Daemon: def __init__(self, pidfile=None): self.pidfile = pidfile or os.path.join(\"/var/run/exhal.service\")", "except IOError: return try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError as err:", "open(self.pidfile, \"w+\") as fh: fh.write(str(os.getpid()) + \"\\n\") def get_pidfile(self): with open(self.pidfile, \"r\") as", "time.sleep(0.1) except OSError as err: e = str(err.args) if e.find(\"No such process\") >", "except IOError: pass finally: self.daemonize() self.run() def stop(self): try: pid = self.get_pidfile() except", "import time from contextlib import suppress from signal import SIGTERM class Daemon: def", "Daemon: def __init__(self, pidfile=None): self.pidfile = pidfile or os.path.join(\"/var/run/exhal.service\") def start(self): try: self.get_pidfile()", "= pidfile or os.path.join(\"/var/run/exhal.service\") def start(self): try: self.get_pidfile() except IOError: pass finally: self.daemonize()", "return try: while 1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError as err: e =", "suppress from signal import SIGTERM class Daemon: def __init__(self, pidfile=None): self.pidfile = pidfile", "> 0: sys.exit(0) except OSError as err: self.error(f\"failed to fork a child process.", "self.get_pidfile() except IOError: pass finally: self.daemonize() self.run() def stop(self): try: pid = self.get_pidfile()", "with suppress(FileNotFoundError): os.remove(self.pidfile) def create_pidfile(self): with open(self.pidfile, \"w+\") as fh: fh.write(str(os.getpid()) + \"\\n\")", "such process\") > 0: self.delete_pidfile() else: sys.exit(1) def daemonize(self): self.fork() os.chdir(\"/\") os.setsid() os.umask(0)", "self.delete_pidfile() else: sys.exit(1) def daemonize(self): self.fork() os.chdir(\"/\") os.setsid() os.umask(0) self.fork() atexit.register(self.delete_pidfile) self.create_pidfile() def", "import os import sys import time from contextlib import suppress from signal import", "1: os.kill(pid, SIGTERM) time.sleep(0.1) except OSError as err: e = str(err.args) if e.find(\"No", "finally: self.daemonize() self.run() def stop(self): try: pid = self.get_pidfile() except IOError: return try:", "OSError as err: e = str(err.args) if e.find(\"No such process\") > 0: self.delete_pidfile()", "e = str(err.args) if e.find(\"No such process\") > 0: self.delete_pidfile() else: sys.exit(1) def", "fh.write(str(os.getpid()) + \"\\n\") def get_pidfile(self): with open(self.pidfile, \"r\") as fh: return int(fh.read().strip()) def", "try: self.get_pidfile() except IOError: pass finally: self.daemonize() self.run() def stop(self): try: pid =", "stop(self): try: pid = self.get_pidfile() except IOError: return try: while 1: os.kill(pid, SIGTERM)", "with open(self.pidfile, \"w+\") as fh: fh.write(str(os.getpid()) + \"\\n\") def get_pidfile(self): with open(self.pidfile, \"r\")", "os.setsid() os.umask(0) self.fork() atexit.register(self.delete_pidfile) self.create_pidfile() def fork(self): try: if os.fork() > 0: sys.exit(0)", "import suppress from signal import SIGTERM class Daemon: def __init__(self, pidfile=None): self.pidfile =", "import SIGTERM class Daemon: def __init__(self, pidfile=None): self.pidfile = pidfile or os.path.join(\"/var/run/exhal.service\") def", "{err}\\n\") def delete_pidfile(self): with suppress(FileNotFoundError): os.remove(self.pidfile) def create_pidfile(self): with open(self.pidfile, \"w+\") as fh:" ]
[ "im.shape[1]) > max_scale: f = float(max_scale) / max(im.shape[0], im.shape[1]) return cv2.resize(im, None, None,", "tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info =", "cv2img) print('Creating boxes done') ''' No 2 Sess outf of 2:sess ''' with", "tf.reshape(jpegs, shape=[]) jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg},", "tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info = resize_im(cv2img,", "if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale: f =", "for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region of", "the image with an area containing 87.5% of # the original image. image", "= crop_resize_image(imageplaceholder_info, result_boxes) # output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- ''' result_boxes=", "'crop_resize_img': output_crop_resize_img, # 'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ # 'predict_images':prediction_signature,", "os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..')) from lib.networks.factory import get_network from lib.fast_rcnn.config import cfg, cfg_from_file from", "= tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes, #", "'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois, ', rois) scores = rois[:, 0] #print('scores, ', scores )", "rescale to [-1,1] instead of [0, 1) image = tf.subtract(image, 0.5) image =", "------------- | | ---------------- (xmax, ymax) \"\"\" # Specify input/output input_img = sess.graph.get_tensor_by_name('Placeholder:0')", "_ = proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois, ', rois) scores = rois[:,", "import proposal_layer dir_path = os.path.dirname(os.path.realpath(__file__)) def resize_im(im, scale, max_scale=None): f = float(scale) /", "(max_x, max_y)]) print('rects.append, ', rects) return rects def export(): ''' No 1 Sess", "jpegs = tf_example['image/encoded'] image_string = tf.reshape(jpegs, shape=[]) jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred", "f * max(im.shape[0], im.shape[1]) > max_scale: f = float(max_scale) / max(im.shape[0], im.shape[1]) return", "im_blob.shape[2], im_scales[0]]], dtype=np.float32) cls_prob, box_pred = sess.run([output_cls_box, output_box_pred], feed_dict={input_img: blobs['data']}) #print('cls_prob, ', cls_prob,", "# 'resize_im_info':im_info_output, # 'crop_resize_img': output_crop_resize_img, # 'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING],", "central region of the image with an area containing 87.5% of # the", "print('Exporting trained model ') raw_image = tf.placeholder(tf.string, name='tf_box') feature_configs = { 'image/encoded': tf.FixedLenFeature(", "get_network from lib.fast_rcnn.config import cfg, cfg_from_file from lib.fast_rcnn.test import test_ctpn from lib.utils.timer import", "output_crop_resize_img, # 'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ # 'predict_images':prediction_signature, 'predict_images_post':", "test_ctpn from lib.utils.timer import Timer from lib.text_connector.detectors import TextDetector from lib.text_connector.text_connect_cfg import Config", "of [0, 1) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image", "original image. image = tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0) image = tf.squeeze(image,", "image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region of the image with", "= tf.expand_dims(image, 0) image = tf.squeeze(image, [0]) # Finally, rescale to [-1,1] instead", "resize_im(im, scale, max_scale=None): f = float(scale) / min(im.shape[0], im.shape[1]) if max_scale != None", "result_boxes= np.array(result_boxes, dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images':", "preprocess_image(image_buffer): \"\"\"Preprocess JPEG encoded bytes to 3D float Tensor.\"\"\" # Decode the string", "gfile.FastGFile('../data/ctpn.pb', 'rb') as f: restored_graph_def = tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def, input_map=None, return_elements=None, name=\"\"", "crop_resize_image(imageplaceholder_info, result_boxes) # output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- ''' result_boxes= np.array(result_boxes,", "channels=3) image.set_shape([256, 256, 256,3]) # self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes') # After this point,", "tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes,", "output_box_pred], feed_dict={input_img: blobs['data']}) #print('cls_prob, ', cls_prob, box_pred ) print('box_pred, ', box_pred ) rois,", "rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois, ', rois) scores =", "max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x, min_y), (max_x,", "cls_prob, box_pred ) print('box_pred, ', box_pred ) rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'],", "set dynamically by decode_jpeg. In other words, the height # and width of", "outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes, # 'resize_im_info':im_info_output, # 'crop_resize_img': output_crop_resize_img, # 'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME", "= [] for box in boxes: min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) min_y", "No 2 Sess outf of 2:sess ''' with tf.Session() as sess: with gfile.FastGFile('../data/ctpn.pb',", "coordinates: [(xmin, ymin), (xmax, ymax)] (xmin, ymin) ------------- | | ---------------- (xmax, ymax)", "cfg.TEST.HAS_RPN: im_blob = blobs['data'] blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32) cls_prob, box_pred =", "np.array(result_boxes, dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input},", "inputs={'images': tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes, # 'resize_im_info':im_info_output, # 'crop_resize_img': output_crop_resize_img, # 'crop_resize_im_info':", "sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ # 'predict_images':prediction_signature, 'predict_images_post': prediction_post_signature }) builder.save(as_text=False) if __name__ == '__main__':", "jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob", "__future__ import print_function import tensorflow as tf import numpy as np import os,", "1:5] / im_scales[0] #print('boxes=rois, ', boxes ) textdetector = TextDetector() print('textDetector, ', textdetector", "from lib.utils.timer import Timer from lib.text_connector.detectors import TextDetector from lib.text_connector.text_connect_cfg import Config as", "graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img = cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating boxes", "tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def, input_map=None, return_elements=None, name=\"\" ) ''' export_path_base = args.export_model_dir export_path", "import get_network from lib.fast_rcnn.config import cfg, cfg_from_file from lib.fast_rcnn.test import test_ctpn from lib.utils.timer", "tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region of the image with an area", "return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f def preprocess_image(image_buffer): \"\"\"Preprocess JPEG encoded", "image with an area containing 87.5% of # the original image. image =", "to (-1, 1). The various # adjust_* ops all require this range for", "2.0) return image def query_ctpn(sess, cv2img): \"\"\"Args: sess: tensorflow session cfg: CTPN config", "lib.fast_rcnn.test import test_ctpn from lib.utils.timer import Timer from lib.text_connector.detectors import TextDetector from lib.text_connector.text_connect_cfg", "rects = [] for box in boxes: min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale))", "end, when they're rescaled to (-1, 1). The various # adjust_* ops all", "sys, cv2 from tensorflow.python.platform import gfile import glob import shutil dir_path = os.path.dirname(os.path.realpath(__file__))", "rescaled to (-1, 1). The various # adjust_* ops all require this range", "0.5) image = tf.multiply(image, 2.0) return image def query_ctpn(sess, cv2img): \"\"\"Args: sess: tensorflow", "region of the image with an area containing 87.5% of # the original", "min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) max_y = max(int(box[1]/scale),", "config = tf.ConfigProto(allow_soft_placement=True) ctpn_sess = tf.Session(config=config) with ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f:", "prediction_post_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes, # 'resize_im_info':im_info_output, #", "img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales = _get_blobs(img, None) if cfg.TEST.HAS_RPN:", "''' result_boxes= np.array(result_boxes, dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature = ( tf.saved_model.signature_def_utils.build_signature_def(", "im_scales = _get_blobs(img, None) if cfg.TEST.HAS_RPN: im_blob = blobs['data'] blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2],", "None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f def preprocess_image(image_buffer): \"\"\"Preprocess JPEG encoded bytes to 3D", "= resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes) # output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info =", "Timer from lib.text_connector.detectors import TextDetector from lib.text_connector.text_connect_cfg import Config as TextLineCfg from lib.fast_rcnn.test", "output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb : img, ', img) img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE,", "as f: restored_graph_def = tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def, input_map=None, return_elements=None, name=\"\" ) '''", "for box in boxes: min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) min_y = min(int(box[1]/scale),", "tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes) #", "range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region", "dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region of the", "sess.run([output_cls_box, output_box_pred], feed_dict={input_img: blobs['data']}) #print('cls_prob, ', cls_prob, box_pred ) print('box_pred, ', box_pred )", "ymin) ------------- | | ---------------- (xmax, ymax) \"\"\" # Specify input/output input_img =", "Sess outf of 2:sess ''' with tf.Session() as sess: with gfile.FastGFile('../data/ctpn.pb', 'rb') as", "JPEG. # Note that the resulting image contains an unknown height and width", "image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image def query_ctpn(sess, cv2img):", "> max_scale: f = float(max_scale) / max(im.shape[0], im.shape[1]) return cv2.resize(im, None, None, fx=f,", "os, sys, cv2 from tensorflow.python.platform import gfile import glob import shutil dir_path =", "#---------- ''' result_boxes= np.array(result_boxes, dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature = (", "= tf.ConfigProto(allow_soft_placement=True) ctpn_sess = tf.Session(config=config) with ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f: graph_def", "adjust_* ops all require this range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32)", "'resize_im_info':im_info_output, # 'crop_resize_img': output_crop_resize_img, # 'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={", "ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img = cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating boxes done')", "#crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes) # output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- '''", "the height # and width of image is unknown at compile-time. image =", "= tf.image.decode_image(image_buffer, channels=3) image.set_shape([256, 256, 256,3]) # self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes') # After", "session cfg: CTPN config img: numpy array image Returns: A list of detected", "input_map=None, return_elements=None, name=\"\" ) ''' export_path_base = args.export_model_dir export_path = os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) '''", "#print('boxes=rois, ', boxes ) textdetector = TextDetector() print('textDetector, ', textdetector ) boxes =", "tensorflow session cfg: CTPN config img: numpy array image Returns: A list of", "name='tf_box') feature_configs = { 'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string), } tf_example = tf.parse_example(raw_image ,", "in boxes: min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale),", "as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img = cv2.imread(\"../data/demo/006.jpg\",", "ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='')", "int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x, min_y), (max_x, max_y)]) print('rects.append, ', rects) return rects def export():", "unknown height and width # that is set dynamically by decode_jpeg. In other", "import gfile import glob import shutil dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..')) from lib.networks.factory", "im_blob = blobs['data'] blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32) cls_prob, box_pred = sess.run([output_cls_box,", "compile-time. image = tf.image.decode_image(image_buffer, channels=3) image.set_shape([256, 256, 256,3]) # self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes')", ") print('box_pred, ', box_pred ) rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES)", "float Tensor.\"\"\" # Decode the string as an RGB JPEG. # Note that", "all image pixels reside in [0,1) # until the very end, when they're", "by decode_jpeg. In other words, the height # and width of image is", "img, ', img) img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales = _get_blobs(img,", "min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) max_x", "| ---------------- (xmax, ymax) \"\"\" # Specify input/output input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box =", "tf.FixedLenFeature( shape=[], dtype=tf.string), } tf_example = tf.parse_example(raw_image , feature_configs) jpegs = tf_example['image/encoded'] image_string", ", feature_configs) jpegs = tf_example['image/encoded'] image_string = tf.reshape(jpegs, shape=[]) jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg,", "256, 256,3]) # self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes') # After this point, all image", "central_fraction=0.875) image = tf.expand_dims(image, 0) image = tf.squeeze(image, [0]) # Finally, rescale to", "rois) scores = rois[:, 0] #print('scores, ', scores ) boxes = rois[:, 1:5]", "int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) max_y = max(int(box[1]/scale), int(box[3]/scale),", "tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained model to', export_path) print('Exporting trained model ') raw_image = tf.placeholder(tf.string,", "list of detected bounding boxes, each bounding box have followed coordinates: [(xmin, ymin),", "# Finally, rescale to [-1,1] instead of [0, 1) image = tf.subtract(image, 0.5)", "= tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes)", "# that is set dynamically by decode_jpeg. In other words, the height #", "export(): ''' No 1 Sess outf of 2 : ctpn_sess ''' cfg_from_file(os.path.join(dir_path, 'text_post.yml'))", "= tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0) image = tf.squeeze(image, [0]) # Finally,", "= tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained model to', export_path) print('Exporting trained model ') raw_image =", "boxes done') ''' No 2 Sess outf of 2:sess ''' with tf.Session() as", "# output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- ''' result_boxes= np.array(result_boxes, dtype=np.float32) result_boxes=", "im.shape[1]) return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f def preprocess_image(image_buffer): \"\"\"Preprocess JPEG", "textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2]) print('boxes=textdetector, ', boxes ) # Convert boxes to bouding", "as an RGB JPEG. # Note that the resulting image contains an unknown", "the resulting image contains an unknown height and width # that is set", "print('rects.append, ', rects) return rects def export(): ''' No 1 Sess outf of", "width of image is unknown at compile-time. image = tf.image.decode_image(image_buffer, channels=3) image.set_shape([256, 256,", "!= None and f * max(im.shape[0], im.shape[1]) > max_scale: f = float(max_scale) /", "dtype=np.float32) cls_prob, box_pred = sess.run([output_cls_box, output_box_pred], feed_dict={input_img: blobs['data']}) #print('cls_prob, ', cls_prob, box_pred )", "sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb : img, ', img) img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs,", "tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img", "interpolation=cv2.INTER_LINEAR), f def preprocess_image(image_buffer): \"\"\"Preprocess JPEG encoded bytes to 3D float Tensor.\"\"\" #", "that is set dynamically by decode_jpeg. In other words, the height # and", "this point, all image pixels reside in [0,1) # until the very end,", "jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob =", "cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config = tf.ConfigProto(allow_soft_placement=True) ctpn_sess = tf.Session(config=config) with ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb', 'rb')", "ymax)] (xmin, ymin) ------------- | | ---------------- (xmax, ymax) \"\"\" # Specify input/output", "#print('cls_prob, ', cls_prob, box_pred ) print('box_pred, ', box_pred ) rois, _ = proposal_layer(cls_prob,", "of the image with an area containing 87.5% of # the original image.", "print('rois, ', rois) scores = rois[:, 0] #print('scores, ', scores ) boxes =", "f = float(scale) / min(im.shape[0], im.shape[1]) if max_scale != None and f *", "cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f def preprocess_image(image_buffer): \"\"\"Preprocess JPEG encoded bytes", "height # and width of image is unknown at compile-time. image = tf.image.decode_image(image_buffer,", "blobs, im_scales = _get_blobs(img, None) if cfg.TEST.HAS_RPN: im_blob = blobs['data'] blobs['im_info'] = np.array([[im_blob.shape[1],", "feature_configs = { 'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string), } tf_example = tf.parse_example(raw_image , feature_configs)", "decode_jpeg. In other words, the height # and width of image is unknown", "1). The various # adjust_* ops all require this range for dtype float.", "= _get_blobs(img, None) if cfg.TEST.HAS_RPN: im_blob = blobs['data'] blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],", "anchor_scales=cfg.ANCHOR_SCALES) print('rois, ', rois) scores = rois[:, 0] #print('scores, ', scores ) boxes", "= tf.placeholder(tf.string, name='tf_box') feature_configs = { 'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string), } tf_example =", "proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois, ', rois) scores = rois[:, 0] #print('scores,", "Crop the central region of the image with an area containing 87.5% of", "\"\"\" # Specify input/output input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0')", "'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ # 'predict_images':prediction_signature, 'predict_images_post': prediction_post_signature })", "''' with tf.Session() as sess: with gfile.FastGFile('../data/ctpn.pb', 'rb') as f: restored_graph_def = tf.GraphDef()", ") rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois, ', rois) scores", "TextDetector from lib.text_connector.text_connect_cfg import Config as TextLineCfg from lib.fast_rcnn.test import _get_blobs from lib.rpn_msr.proposal_layer_tf", "gfile import glob import shutil dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..')) from lib.networks.factory import", "this range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central", "= os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..')) from lib.networks.factory import get_network from lib.fast_rcnn.config import cfg, cfg_from_file", "jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input", "scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales = _get_blobs(img, None) if cfg.TEST.HAS_RPN: im_blob = blobs['data'] blobs['im_info']", "when they're rescaled to (-1, 1). The various # adjust_* ops all require", "rects def export(): ''' No 1 Sess outf of 2 : ctpn_sess '''", "f: restored_graph_def = tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def, input_map=None, return_elements=None, name=\"\" ) ''' export_path_base", "image. image = tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0) image = tf.squeeze(image, [0])", "lib.networks.factory import get_network from lib.fast_rcnn.config import cfg, cfg_from_file from lib.fast_rcnn.test import test_ctpn from", "np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32) cls_prob, box_pred = sess.run([output_cls_box, output_box_pred], feed_dict={input_img: blobs['data']}) #print('cls_prob, ',", "input/output input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb : img,", "No 1 Sess outf of 2 : ctpn_sess ''' cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config =", "tf.parse_example(raw_image , feature_configs) jpegs = tf_example['image/encoded'] image_string = tf.reshape(jpegs, shape=[]) jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]',", "= float(scale) / min(im.shape[0], im.shape[1]) if max_scale != None and f * max(im.shape[0],", "as np import os, sys, cv2 from tensorflow.python.platform import gfile import glob import", "TextLineCfg from lib.fast_rcnn.test import _get_blobs from lib.rpn_msr.proposal_layer_tf import proposal_layer dir_path = os.path.dirname(os.path.realpath(__file__)) def", "boxes ) textdetector = TextDetector() print('textDetector, ', textdetector ) boxes = textdetector.detect(boxes, scores[:,", ") textdetector = TextDetector() print('textDetector, ', textdetector ) boxes = textdetector.detect(boxes, scores[:, np.newaxis],", "2 : ctpn_sess ''' cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config = tf.ConfigProto(allow_soft_placement=True) ctpn_sess = tf.Session(config=config) with", "f def preprocess_image(image_buffer): \"\"\"Preprocess JPEG encoded bytes to 3D float Tensor.\"\"\" # Decode", "resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes) # output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info)", "name='input_image_as_bytes') # After this point, all image pixels reside in [0,1) # until", "with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer())", "= float(max_scale) / max(im.shape[0], im.shape[1]) return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f", "bytes to 3D float Tensor.\"\"\" # Decode the string as an RGB JPEG.", "print('jpeg,jpeg.shape[]', jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image)", "lib.fast_rcnn.test import _get_blobs from lib.rpn_msr.proposal_layer_tf import proposal_layer dir_path = os.path.dirname(os.path.realpath(__file__)) def resize_im(im, scale,", "', scores ) boxes = rois[:, 1:5] / im_scales[0] #print('boxes=rois, ', boxes )", "tf.placeholder(tf.string, name='tf_box') feature_configs = { 'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string), } tf_example = tf.parse_example(raw_image", "with gfile.FastGFile('../data/ctpn.pb', 'rb') as f: restored_graph_def = tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def, input_map=None, return_elements=None,", "config img: numpy array image Returns: A list of detected bounding boxes, each", "tf.image.decode_image(image_buffer, channels=3) image.set_shape([256, 256, 256,3]) # self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes') # After this", "scores = rois[:, 0] #print('scores, ', scores ) boxes = rois[:, 1:5] /", "tf.import_graph_def( restored_graph_def, input_map=None, return_elements=None, name=\"\" ) ''' export_path_base = args.export_model_dir export_path = os.path.join(tf.compat.as_bytes(export_path_base),", "from tensorflow.python.platform import gfile import glob import shutil dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..'))", "sys.path.append(os.path.join(dir_path, '..')) from lib.networks.factory import get_network from lib.fast_rcnn.config import cfg, cfg_from_file from lib.fast_rcnn.test", "tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info", "cfg, cfg_from_file from lib.fast_rcnn.test import test_ctpn from lib.utils.timer import Timer from lib.text_connector.detectors import", "raw_image = tf.placeholder(tf.string, name='tf_box') feature_configs = { 'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string), } tf_example", "blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32) cls_prob, box_pred = sess.run([output_cls_box, output_box_pred], feed_dict={input_img: blobs['data']})", "cfg: CTPN config img: numpy array image Returns: A list of detected bounding", "(xmax, ymax)] (xmin, ymin) ------------- | | ---------------- (xmax, ymax) \"\"\" # Specify", "as TextLineCfg from lib.fast_rcnn.test import _get_blobs from lib.rpn_msr.proposal_layer_tf import proposal_layer dir_path = os.path.dirname(os.path.realpath(__file__))", "# 'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ # 'predict_images':prediction_signature, 'predict_images_post': prediction_post_signature", "', cls_prob, box_pred ) print('box_pred, ', box_pred ) rois, _ = proposal_layer(cls_prob, box_pred,", "/ im_scales[0] #print('boxes=rois, ', boxes ) textdetector = TextDetector() print('textDetector, ', textdetector )", "from lib.networks.factory import get_network from lib.fast_rcnn.config import cfg, cfg_from_file from lib.fast_rcnn.test import test_ctpn", "= tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- ''' result_boxes= np.array(result_boxes, dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature", "np.newaxis], img.shape[:2]) print('boxes=textdetector, ', boxes ) # Convert boxes to bouding rectangles rects", "tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- ''' result_boxes= np.array(result_boxes, dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes =", "int(box[7]/scale)) rects.append([(min_x, min_y), (max_x, max_y)]) print('rects.append, ', rects) return rects def export(): '''", "array image Returns: A list of detected bounding boxes, each bounding box have", "img.shape[:2]) print('boxes=textdetector, ', boxes ) # Convert boxes to bouding rectangles rects =", "= tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def, input_map=None, return_elements=None, name=\"\" ) ''' export_path_base = args.export_model_dir", "import test_ctpn from lib.utils.timer import Timer from lib.text_connector.detectors import TextDetector from lib.text_connector.text_connect_cfg import", "int(box[6]/scale)) min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale))", "numpy array image Returns: A list of detected bounding boxes, each bounding box", "The various # adjust_* ops all require this range for dtype float. image", "name=\"\" ) ''' export_path_base = args.export_model_dir export_path = os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) ''' builder =", "dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..')) from lib.networks.factory import get_network from lib.fast_rcnn.config import cfg,", "min(im.shape[0], im.shape[1]) if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale:", "jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info", "Decode the string as an RGB JPEG. # Note that the resulting image", "tensor_info_output_boxes, # 'resize_im_info':im_info_output, # 'crop_resize_img': output_crop_resize_img, # 'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess,", "import cfg, cfg_from_file from lib.fast_rcnn.test import test_ctpn from lib.utils.timer import Timer from lib.text_connector.detectors", "export_path = os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) ''' builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained model to', export_path)", "tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred =", "contains an unknown height and width # that is set dynamically by decode_jpeg.", "', boxes ) textdetector = TextDetector() print('textDetector, ', textdetector ) boxes = textdetector.detect(boxes,", "width # that is set dynamically by decode_jpeg. In other words, the height", "name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img = cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating boxes done') ''' No", "box in boxes: min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) min_y = min(int(box[1]/scale), int(box[3]/scale),", "bounding box have followed coordinates: [(xmin, ymin), (xmax, ymax)] (xmin, ymin) ------------- |", "= textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2]) print('boxes=textdetector, ', boxes ) # Convert boxes to", "im_scales[0]]], dtype=np.float32) cls_prob, box_pred = sess.run([output_cls_box, output_box_pred], feed_dict={input_img: blobs['data']}) #print('cls_prob, ', cls_prob, box_pred", "min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) max_y", "def preprocess_image(image_buffer): \"\"\"Preprocess JPEG encoded bytes to 3D float Tensor.\"\"\" # Decode the", "at compile-time. image = tf.image.decode_image(image_buffer, channels=3) image.set_shape([256, 256, 256,3]) # self.img_pl = tf.placeholder(tf.string,", "ymin), (xmax, ymax)] (xmin, ymin) ------------- | | ---------------- (xmax, ymax) \"\"\" #", "') raw_image = tf.placeholder(tf.string, name='tf_box') feature_configs = { 'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string), }", "= sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb : img, ', img) img, scale =", "instead of [0, 1) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return", "print('textDetector, ', textdetector ) boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2]) print('boxes=textdetector, ', boxes", "def resize_im(im, scale, max_scale=None): f = float(scale) / min(im.shape[0], im.shape[1]) if max_scale !=", "rois[:, 0] #print('scores, ', scores ) boxes = rois[:, 1:5] / im_scales[0] #print('boxes=rois,", "tf import numpy as np import os, sys, cv2 from tensorflow.python.platform import gfile", "', rects) return rects def export(): ''' No 1 Sess outf of 2", "''' export_path_base = args.export_model_dir export_path = os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) ''' builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting", "img: numpy array image Returns: A list of detected bounding boxes, each bounding", "'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string), } tf_example = tf.parse_example(raw_image , feature_configs) jpegs = tf_example['image/encoded']", ") boxes = rois[:, 1:5] / im_scales[0] #print('boxes=rois, ', boxes ) textdetector =", "textdetector ) boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2]) print('boxes=textdetector, ', boxes ) #", "shape=[], dtype=tf.string), } tf_example = tf.parse_example(raw_image , feature_configs) jpegs = tf_example['image/encoded'] image_string =", ": img, ', img) img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales =", "CTPN config img: numpy array image Returns: A list of detected bounding boxes,", "of 2 : ctpn_sess ''' cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config = tf.ConfigProto(allow_soft_placement=True) ctpn_sess = tf.Session(config=config)", "string as an RGB JPEG. # Note that the resulting image contains an", "/ min(im.shape[0], im.shape[1]) if max_scale != None and f * max(im.shape[0], im.shape[1]) >", "= TextDetector() print('textDetector, ', textdetector ) boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2]) print('boxes=textdetector,", "Sess outf of 2 : ctpn_sess ''' cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config = tf.ConfigProto(allow_soft_placement=True) ctpn_sess", "output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob)", "= tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- ''' result_boxes= np.array(result_boxes, dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes", "max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x, min_y), (max_x, max_y)]) print('rects.append, ', rects) return rects", "int(box[4]/scale), int(box[6]/scale)) min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale),", "scores ) boxes = rois[:, 1:5] / im_scales[0] #print('boxes=rois, ', boxes ) textdetector", "---------------- (xmax, ymax) \"\"\" # Specify input/output input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0')", "with an area containing 87.5% of # the original image. image = tf.image.central_crop(image,", "an area containing 87.5% of # the original image. image = tf.image.central_crop(image, central_fraction=0.875)", "Tensor.\"\"\" # Decode the string as an RGB JPEG. # Note that the", "87.5% of # the original image. image = tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image,", "Convert boxes to bouding rectangles rects = [] for box in boxes: min_x", "the central region of the image with an area containing 87.5% of #", "= cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating boxes done') ''' No 2 Sess outf", "tf.placeholder(tf.string, name='input_image_as_bytes') # After this point, all image pixels reside in [0,1) #", "cfg_from_file from lib.fast_rcnn.test import test_ctpn from lib.utils.timer import Timer from lib.text_connector.detectors import TextDetector", "#print('scores, ', scores ) boxes = rois[:, 1:5] / im_scales[0] #print('boxes=rois, ', boxes", "import Config as TextLineCfg from lib.fast_rcnn.test import _get_blobs from lib.rpn_msr.proposal_layer_tf import proposal_layer dir_path", "various # adjust_* ops all require this range for dtype float. image =", "args.export_model_dir export_path = os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) ''' builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained model to',", "graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img = cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess,", "256,3]) # self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes') # After this point, all image pixels", "with ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def,", "int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x, min_y), (max_x, max_y)])", "import tensorflow as tf import numpy as np import os, sys, cv2 from", "def query_ctpn(sess, cv2img): \"\"\"Args: sess: tensorflow session cfg: CTPN config img: numpy array", "require this range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the", "', img) img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales = _get_blobs(img, None)", "= os.path.dirname(os.path.realpath(__file__)) def resize_im(im, scale, max_scale=None): f = float(scale) / min(im.shape[0], im.shape[1]) if", "= proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois, ', rois) scores = rois[:, 0]", "feature_configs) jpegs = tf_example['image/encoded'] image_string = tf.reshape(jpegs, shape=[]) jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg, jpeg.shape)", "point, all image pixels reside in [0,1) # until the very end, when", "result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes},", "tf.squeeze(image, [0]) # Finally, rescale to [-1,1] instead of [0, 1) image =", "encoded bytes to 3D float Tensor.\"\"\" # Decode the string as an RGB", "int(box[7]/scale)) max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale))", "max_scale: f = float(max_scale) / max(im.shape[0], im.shape[1]) return cv2.resize(im, None, None, fx=f, fy=f,", "box have followed coordinates: [(xmin, ymin), (xmax, ymax)] (xmin, ymin) ------------- | |", "restored_graph_def, input_map=None, return_elements=None, name=\"\" ) ''' export_path_base = args.export_model_dir export_path = os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version)))", "from lib.fast_rcnn.test import _get_blobs from lib.rpn_msr.proposal_layer_tf import proposal_layer dir_path = os.path.dirname(os.path.realpath(__file__)) def resize_im(im,", "boxes = rois[:, 1:5] / im_scales[0] #print('boxes=rois, ', boxes ) textdetector = TextDetector()", "lib.text_connector.text_connect_cfg import Config as TextLineCfg from lib.fast_rcnn.test import _get_blobs from lib.rpn_msr.proposal_layer_tf import proposal_layer", "{ 'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string), } tf_example = tf.parse_example(raw_image , feature_configs) jpegs =", "bouding rectangles rects = [] for box in boxes: min_x = min(int(box[0]/scale), int(box[2]/scale),", "tf.ConfigProto(allow_soft_placement=True) ctpn_sess = tf.Session(config=config) with ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f: graph_def =", "pixels reside in [0,1) # until the very end, when they're rescaled to", "RGB JPEG. # Note that the resulting image contains an unknown height and", "# After this point, all image pixels reside in [0,1) # until the", "return rects def export(): ''' No 1 Sess outf of 2 : ctpn_sess", "dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input}, outputs={'detection_boxes':", "is set dynamically by decode_jpeg. In other words, the height # and width", "= resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales = _get_blobs(img, None) if cfg.TEST.HAS_RPN: im_blob =", "area containing 87.5% of # the original image. image = tf.image.central_crop(image, central_fraction=0.875) image", "blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois, ', rois) scores = rois[:, 0] #print('scores, ', scores", "', textdetector ) boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2]) print('boxes=textdetector, ', boxes )", "rects.append([(min_x, min_y), (max_x, max_y)]) print('rects.append, ', rects) return rects def export(): ''' No", "# adjust_* ops all require this range for dtype float. image = tf.image.convert_image_dtype(image,", "= tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image def query_ctpn(sess, cv2img): \"\"\"Args:", "''' No 1 Sess outf of 2 : ctpn_sess ''' cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config", "''' #crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes) # output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img)", "# Note that the resulting image contains an unknown height and width #", "# and width of image is unknown at compile-time. image = tf.image.decode_image(image_buffer, channels=3)", "cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating boxes done') ''' No 2 Sess outf of", "tf.Session(config=config) with ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default()", "sess: with gfile.FastGFile('../data/ctpn.pb', 'rb') as f: restored_graph_def = tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def, input_map=None,", "= min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) max_y =", "max(im.shape[0], im.shape[1]) > max_scale: f = float(max_scale) / max(im.shape[0], im.shape[1]) return cv2.resize(im, None,", "JPEG encoded bytes to 3D float Tensor.\"\"\" # Decode the string as an", "to [-1,1] instead of [0, 1) image = tf.subtract(image, 0.5) image = tf.multiply(image,", "ctpn_sess ''' cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config = tf.ConfigProto(allow_soft_placement=True) ctpn_sess = tf.Session(config=config) with ctpn_sess.as_default(): with", "model to', export_path) print('Exporting trained model ') raw_image = tf.placeholder(tf.string, name='tf_box') feature_configs =", "tensorflow.python.platform import gfile import glob import shutil dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..')) from", "tf.compat.as_bytes(str(args.model_version))) ''' builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained model to', export_path) print('Exporting trained model", "boxes to bouding rectangles rects = [] for box in boxes: min_x =", "/ max(im.shape[0], im.shape[1]) return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f def preprocess_image(image_buffer):", "import numpy as np import os, sys, cv2 from tensorflow.python.platform import gfile import", "scale, max_scale=None): f = float(scale) / min(im.shape[0], im.shape[1]) if max_scale != None and", "# Crop the central region of the image with an area containing 87.5%", "= tf.squeeze(image, [0]) # Finally, rescale to [-1,1] instead of [0, 1) image", "int(box[5]/scale), int(box[7]/scale)) max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale),", "= ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes, # 'resize_im_info':im_info_output, # 'crop_resize_img':", "rectangles rects = [] for box in boxes: min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale),", "max(im.shape[0], im.shape[1]) return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f def preprocess_image(image_buffer): \"\"\"Preprocess", "tf.Session() as sess: with gfile.FastGFile('../data/ctpn.pb', 'rb') as f: restored_graph_def = tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def(", "output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ # 'predict_images':prediction_signature, 'predict_images_post': prediction_post_signature }) builder.save(as_text=False)", "and width # that is set dynamically by decode_jpeg. In other words, the", "blobs['data'] blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32) cls_prob, box_pred = sess.run([output_cls_box, output_box_pred], feed_dict={input_img:", "\"\"\"Args: sess: tensorflow session cfg: CTPN config img: numpy array image Returns: A", "In other words, the height # and width of image is unknown at", "= tf.reshape(jpegs, shape=[]) jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0':", "[] for box in boxes: min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) min_y =", "( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes, # 'resize_im_info':im_info_output, # 'crop_resize_img': output_crop_resize_img,", "0) image = tf.squeeze(image, [0]) # Finally, rescale to [-1,1] instead of [0,", "cv2img = cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating boxes done') ''' No 2 Sess", "rois[:, 1:5] / im_scales[0] #print('boxes=rois, ', boxes ) textdetector = TextDetector() print('textDetector, ',", "(xmin, ymin) ------------- | | ---------------- (xmax, ymax) \"\"\" # Specify input/output input_img", "export_path_base = args.export_model_dir export_path = os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) ''' builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained", "box_pred ) rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois, ', rois)", "ops all require this range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) #", "= min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) max_x =", "have followed coordinates: [(xmin, ymin), (xmax, ymax)] (xmin, ymin) ------------- | | ----------------", "lib.text_connector.detectors import TextDetector from lib.text_connector.text_connect_cfg import Config as TextLineCfg from lib.fast_rcnn.test import _get_blobs", "tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes, # 'resize_im_info':im_info_output, # 'crop_resize_img': output_crop_resize_img, # 'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME ))", "image Returns: A list of detected bounding boxes, each bounding box have followed", "= rois[:, 0] #print('scores, ', scores ) boxes = rois[:, 1:5] / im_scales[0]", "shape=[]) jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0'])", "| | ---------------- (xmax, ymax) \"\"\" # Specify input/output input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box", "min_y), (max_x, max_y)]) print('rects.append, ', rects) return rects def export(): ''' No 1", "export_path) print('Exporting trained model ') raw_image = tf.placeholder(tf.string, name='tf_box') feature_configs = { 'image/encoded':", "max_y)]) print('rects.append, ', rects) return rects def export(): ''' No 1 Sess outf", "int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x, min_y), (max_x, max_y)]) print('rects.append, ', rects) return rects def", "box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois, ', rois) scores = rois[:, 0] #print('scores, ',", "fy=f, interpolation=cv2.INTER_LINEAR), f def preprocess_image(image_buffer): \"\"\"Preprocess JPEG encoded bytes to 3D float Tensor.\"\"\"", "import glob import shutil dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..')) from lib.networks.factory import get_network", "restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def, input_map=None, return_elements=None, name=\"\" ) ''' export_path_base = args.export_model_dir export_path =", "tf_example['image/encoded'] image_string = tf.reshape(jpegs, shape=[]) jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\", "sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb : img, ', img) img, scale = resize_im(cv2img,", "dir_path = os.path.dirname(os.path.realpath(__file__)) def resize_im(im, scale, max_scale=None): f = float(scale) / min(im.shape[0], im.shape[1])", "_get_blobs(img, None) if cfg.TEST.HAS_RPN: im_blob = blobs['data'] blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)", "trained model ') raw_image = tf.placeholder(tf.string, name='tf_box') feature_configs = { 'image/encoded': tf.FixedLenFeature( shape=[],", "dynamically by decode_jpeg. In other words, the height # and width of image", "os.path.dirname(os.path.realpath(__file__)) def resize_im(im, scale, max_scale=None): f = float(scale) / min(im.shape[0], im.shape[1]) if max_scale", "_get_blobs from lib.rpn_msr.proposal_layer_tf import proposal_layer dir_path = os.path.dirname(os.path.realpath(__file__)) def resize_im(im, scale, max_scale=None): f", "tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img = cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating", "image_string = tf.reshape(jpegs, shape=[]) jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(),", "'..')) from lib.networks.factory import get_network from lib.fast_rcnn.config import cfg, cfg_from_file from lib.fast_rcnn.test import", "''' cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config = tf.ConfigProto(allow_soft_placement=True) ctpn_sess = tf.Session(config=config) with ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb',", "from lib.text_connector.text_connect_cfg import Config as TextLineCfg from lib.fast_rcnn.test import _get_blobs from lib.rpn_msr.proposal_layer_tf import", "None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f def preprocess_image(image_buffer): \"\"\"Preprocess JPEG encoded bytes to", "return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info =", "= tf.placeholder(tf.string, name='input_image_as_bytes') # After this point, all image pixels reside in [0,1)", "resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales = _get_blobs(img, None) if cfg.TEST.HAS_RPN: im_blob = blobs['data']", ")) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ # 'predict_images':prediction_signature, 'predict_images_post': prediction_post_signature }) builder.save(as_text=False) if __name__", "image = tf.multiply(image, 2.0) return image def query_ctpn(sess, cv2img): \"\"\"Args: sess: tensorflow session", "Finally, rescale to [-1,1] instead of [0, 1) image = tf.subtract(image, 0.5) image", "detected bounding boxes, each bounding box have followed coordinates: [(xmin, ymin), (xmax, ymax)]", "', rois) scores = rois[:, 0] #print('scores, ', scores ) boxes = rois[:,", "int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) max_x = max(int(box[0]/scale), int(box[2]/scale),", "A list of detected bounding boxes, each bounding box have followed coordinates: [(xmin,", "tensorflow as tf import numpy as np import os, sys, cv2 from tensorflow.python.platform", "= os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) ''' builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained model to', export_path) print('Exporting", "np import os, sys, cv2 from tensorflow.python.platform import gfile import glob import shutil", "feed_dict={input_img: blobs['data']}) #print('cls_prob, ', cls_prob, box_pred ) print('box_pred, ', box_pred ) rois, _", "tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes) # output_crop_resize_img =", "return_elements=None, name=\"\" ) ''' export_path_base = args.export_model_dir export_path = os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) ''' builder", "and width of image is unknown at compile-time. image = tf.image.decode_image(image_buffer, channels=3) image.set_shape([256,", "= max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x, min_y), (max_x, max_y)]) print('rects.append, ', rects) return", "image is unknown at compile-time. image = tf.image.decode_image(image_buffer, channels=3) image.set_shape([256, 256, 256,3]) #", "output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb : img, ', img) img, scale", "is unknown at compile-time. image = tf.image.decode_image(image_buffer, channels=3) image.set_shape([256, 256, 256,3]) # self.img_pl", "input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) '''", "int(box[6]/scale)) max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x, min_y), (max_x, max_y)]) print('rects.append, ',", "tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes, # 'resize_im_info':im_info_output, # 'crop_resize_img': output_crop_resize_img, #", "(tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred)", "[tf.saved_model.tag_constants.SERVING], signature_def_map={ # 'predict_images':prediction_signature, 'predict_images_post': prediction_post_signature }) builder.save(as_text=False) if __name__ == '__main__': export()", "import _get_blobs from lib.rpn_msr.proposal_layer_tf import proposal_layer dir_path = os.path.dirname(os.path.realpath(__file__)) def resize_im(im, scale, max_scale=None):", "lib.rpn_msr.proposal_layer_tf import proposal_layer dir_path = os.path.dirname(os.path.realpath(__file__)) def resize_im(im, scale, max_scale=None): f = float(scale)", "ctpn_sess = tf.Session(config=config) with ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f: graph_def = tf.GraphDef()", "and f * max(im.shape[0], im.shape[1]) > max_scale: f = float(max_scale) / max(im.shape[0], im.shape[1])", "= tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region of the image with an", "print('box_pred, ', box_pred ) rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois,", "from lib.rpn_msr.proposal_layer_tf import proposal_layer dir_path = os.path.dirname(os.path.realpath(__file__)) def resize_im(im, scale, max_scale=None): f =", "', boxes ) # Convert boxes to bouding rectangles rects = [] for", "lib.utils.timer import Timer from lib.text_connector.detectors import TextDetector from lib.text_connector.text_connect_cfg import Config as TextLineCfg", "rects) return rects def export(): ''' No 1 Sess outf of 2 :", "cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating boxes done') ''' No 2 Sess outf of 2:sess", "#print('Exporting trained model to', export_path) print('Exporting trained model ') raw_image = tf.placeholder(tf.string, name='tf_box')", "model ') raw_image = tf.placeholder(tf.string, name='tf_box') feature_configs = { 'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string),", "3D float Tensor.\"\"\" # Decode the string as an RGB JPEG. # Note", "# until the very end, when they're rescaled to (-1, 1). The various", "from lib.fast_rcnn.config import cfg, cfg_from_file from lib.fast_rcnn.test import test_ctpn from lib.utils.timer import Timer", "other words, the height # and width of image is unknown at compile-time.", "print('boxes=textdetector, ', boxes ) # Convert boxes to bouding rectangles rects = []", "followed coordinates: [(xmin, ymin), (xmax, ymax)] (xmin, ymin) ------------- | | ---------------- (xmax,", "tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- ''' result_boxes= np.array(result_boxes, dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature =", "# the original image. image = tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0) image", ": ctpn_sess ''' cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config = tf.ConfigProto(allow_soft_placement=True) ctpn_sess = tf.Session(config=config) with ctpn_sess.as_default():", "the original image. image = tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0) image =", "[0]) # Finally, rescale to [-1,1] instead of [0, 1) image = tf.subtract(image,", "if cfg.TEST.HAS_RPN: im_blob = blobs['data'] blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32) cls_prob, box_pred", "return image def query_ctpn(sess, cv2img): \"\"\"Args: sess: tensorflow session cfg: CTPN config img:", "(-1, 1). The various # adjust_* ops all require this range for dtype", "tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image def query_ctpn(sess, cv2img): \"\"\"Args: sess:", "= blobs['data'] blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32) cls_prob, box_pred = sess.run([output_cls_box, output_box_pred],", "proposal_layer dir_path = os.path.dirname(os.path.realpath(__file__)) def resize_im(im, scale, max_scale=None): f = float(scale) / min(im.shape[0],", "to', export_path) print('Exporting trained model ') raw_image = tf.placeholder(tf.string, name='tf_box') feature_configs = {", "each bounding box have followed coordinates: [(xmin, ymin), (xmax, ymax)] (xmin, ymin) -------------", "# Decode the string as an RGB JPEG. # Note that the resulting", "Note that the resulting image contains an unknown height and width # that", "box_pred = sess.run([output_cls_box, output_box_pred], feed_dict={input_img: blobs['data']}) #print('cls_prob, ', cls_prob, box_pred ) print('box_pred, ',", "f = float(max_scale) / max(im.shape[0], im.shape[1]) return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR),", "box_pred ) print('box_pred, ', box_pred ) rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST',", "preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred = tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input =", "sess: tensorflow session cfg: CTPN config img: numpy array image Returns: A list", "TextDetector() print('textDetector, ', textdetector ) boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2]) print('boxes=textdetector, ',", "} tf_example = tf.parse_example(raw_image , feature_configs) jpegs = tf_example['image/encoded'] image_string = tf.reshape(jpegs, shape=[])", "2 Sess outf of 2:sess ''' with tf.Session() as sess: with gfile.FastGFile('../data/ctpn.pb', 'rb')", "[-1,1] instead of [0, 1) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0)", "tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes, # 'resize_im_info':im_info_output,", "an RGB JPEG. # Note that the resulting image contains an unknown height", "to bouding rectangles rects = [] for box in boxes: min_x = min(int(box[0]/scale),", "boxes ) # Convert boxes to bouding rectangles rects = [] for box", "bounding boxes, each bounding box have followed coordinates: [(xmin, ymin), (xmax, ymax)] (xmin,", "'text_post.yml')) config = tf.ConfigProto(allow_soft_placement=True) ctpn_sess = tf.Session(config=config) with ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as", "input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb : img, ',", "# 'crop_resize_img': output_crop_resize_img, # 'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ #", "glob import shutil dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..')) from lib.networks.factory import get_network from", "= rois[:, 1:5] / im_scales[0] #print('boxes=rois, ', boxes ) textdetector = TextDetector() print('textDetector,", "with tf.Session() as sess: with gfile.FastGFile('../data/ctpn.pb', 'rb') as f: restored_graph_def = tf.GraphDef() restored_graph_def.ParseFromString(f.read())", "boxes, each bounding box have followed coordinates: [(xmin, ymin), (xmax, ymax)] (xmin, ymin)", ") ''' export_path_base = args.export_model_dir export_path = os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) ''' builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1')", "float(scale) / min(im.shape[0], im.shape[1]) if max_scale != None and f * max(im.shape[0], im.shape[1])", "the string as an RGB JPEG. # Note that the resulting image contains", "= tf.multiply(image, 2.0) return image def query_ctpn(sess, cv2img): \"\"\"Args: sess: tensorflow session cfg:", "= tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes) # output_crop_resize_img", "f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img = cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR)", "= tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info,", "None and f * max(im.shape[0], im.shape[1]) > max_scale: f = float(max_scale) / max(im.shape[0],", "from __future__ import print_function import tensorflow as tf import numpy as np import", "= np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32) cls_prob, box_pred = sess.run([output_cls_box, output_box_pred], feed_dict={input_img: blobs['data']}) #print('cls_prob,", "image = tf.squeeze(image, [0]) # Finally, rescale to [-1,1] instead of [0, 1)", "= max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x, min_y),", "unknown at compile-time. image = tf.image.decode_image(image_buffer, channels=3) image.set_shape([256, 256, 256,3]) # self.img_pl =", "= tf.Session(config=config) with ctpn_sess.as_default(): with tf.gfile.FastGFile('../data/ctpn.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read())", "#outputs={'detection_boxes': tensor_info_output_boxes, # 'resize_im_info':im_info_output, # 'crop_resize_img': output_crop_resize_img, # 'crop_resize_im_info': output_crop_resize_img_info,}, method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables(", "print('Creating boxes done') ''' No 2 Sess outf of 2:sess ''' with tf.Session()", "import print_function import tensorflow as tf import numpy as np import os, sys,", "resulting image contains an unknown height and width # that is set dynamically", "Config as TextLineCfg from lib.fast_rcnn.test import _get_blobs from lib.rpn_msr.proposal_layer_tf import proposal_layer dir_path =", "= tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img = cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img)", "(xmax, ymax) \"\"\" # Specify input/output input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred", "scores[:, np.newaxis], img.shape[:2]) print('boxes=textdetector, ', boxes ) # Convert boxes to bouding rectangles", "'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) ctpn_sess.graph.as_default() tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img =", "done') ''' No 2 Sess outf of 2:sess ''' with tf.Session() as sess:", "as sess: with gfile.FastGFile('../data/ctpn.pb', 'rb') as f: restored_graph_def = tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def,", "tf.expand_dims(image, 0) image = tf.squeeze(image, [0]) # Finally, rescale to [-1,1] instead of", "tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0) image = tf.squeeze(image, [0]) # Finally, rescale", "= tf.parse_example(raw_image , feature_configs) jpegs = tf_example['image/encoded'] image_string = tf.reshape(jpegs, shape=[]) jpeg= preprocess_image(image_string)", "After this point, all image pixels reside in [0,1) # until the very", "fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f def preprocess_image(image_buffer): \"\"\"Preprocess JPEG encoded bytes to 3D float", "\"\"\"Preprocess JPEG encoded bytes to 3D float Tensor.\"\"\" # Decode the string as", "image = tf.image.decode_image(image_buffer, channels=3) image.set_shape([256, 256, 256,3]) # self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes') #", "in [0,1) # until the very end, when they're rescaled to (-1, 1).", "outf of 2 : ctpn_sess ''' cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config = tf.ConfigProto(allow_soft_placement=True) ctpn_sess =", "until the very end, when they're rescaled to (-1, 1). The various #", "= { 'image/encoded': tf.FixedLenFeature( shape=[], dtype=tf.string), } tf_example = tf.parse_example(raw_image , feature_configs) jpegs", "#crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes) # output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info", "dtype=tf.float32) # Crop the central region of the image with an area containing", "sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb : img, ', img) img,", "im_scales[0] #print('boxes=rois, ', boxes ) textdetector = TextDetector() print('textDetector, ', textdetector ) boxes", "'rb') as f: restored_graph_def = tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def, input_map=None, return_elements=None, name=\"\" )", "tf.import_graph_def(graph_def, name='') ctpn_sess.run(tf.global_variables_initializer()) cv2img = cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating boxes done') '''", "max_scale=None): f = float(scale) / min(im.shape[0], im.shape[1]) if max_scale != None and f", "image = tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0) image = tf.squeeze(image, [0]) #", "import os, sys, cv2 from tensorflow.python.platform import gfile import glob import shutil dir_path", "= args.export_model_dir export_path = os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) ''' builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained model", "words, the height # and width of image is unknown at compile-time. image", "Specify input/output input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb :", "builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ # 'predict_images':prediction_signature, 'predict_images_post': prediction_post_signature }) builder.save(as_text=False) if __name__ ==", "ymax) \"\"\" # Specify input/output input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred =", "all require this range for dtype float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop", "scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales = _get_blobs(img, None) if cfg.TEST.HAS_RPN: im_blob", "the very end, when they're rescaled to (-1, 1). The various # adjust_*", "[0,1) # until the very end, when they're rescaled to (-1, 1). The", "to 3D float Tensor.\"\"\" # Decode the string as an RGB JPEG. #", "import shutil dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..')) from lib.networks.factory import get_network from lib.fast_rcnn.config", "from lib.text_connector.detectors import TextDetector from lib.text_connector.text_connect_cfg import Config as TextLineCfg from lib.fast_rcnn.test import", "outf of 2:sess ''' with tf.Session() as sess: with gfile.FastGFile('../data/ctpn.pb', 'rb') as f:", "method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME )) builder.add_meta_graph_and_variables( sess, [tf.saved_model.tag_constants.SERVING], signature_def_map={ # 'predict_images':prediction_signature, 'predict_images_post': prediction_post_signature }) builder.save(as_text=False) if", "2:sess ''' with tf.Session() as sess: with gfile.FastGFile('../data/ctpn.pb', 'rb') as f: restored_graph_def =", "reside in [0,1) # until the very end, when they're rescaled to (-1,", "result_boxes) # output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- ''' result_boxes= np.array(result_boxes, dtype=np.float32)", "of 2:sess ''' with tf.Session() as sess: with gfile.FastGFile('../data/ctpn.pb', 'rb') as f: restored_graph_def", "= sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb : img, ', img) img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE)", "restored_graph_def = tf.GraphDef() restored_graph_def.ParseFromString(f.read()) tf.import_graph_def( restored_graph_def, input_map=None, return_elements=None, name=\"\" ) ''' export_path_base =", "# Specify input/output input_img = sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb", "image pixels reside in [0,1) # until the very end, when they're rescaled", "max_x = max(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x,", "from lib.fast_rcnn.test import test_ctpn from lib.utils.timer import Timer from lib.text_connector.detectors import TextDetector from", "that the resulting image contains an unknown height and width # that is", "image def query_ctpn(sess, cv2img): \"\"\"Args: sess: tensorflow session cfg: CTPN config img: numpy", "numpy as np import os, sys, cv2 from tensorflow.python.platform import gfile import glob", "very end, when they're rescaled to (-1, 1). The various # adjust_* ops", "of detected bounding boxes, each bounding box have followed coordinates: [(xmin, ymin), (xmax,", "1) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image def query_ctpn(sess,", ") # Convert boxes to bouding rectangles rects = [] for box in", "', box_pred ) rois, _ = proposal_layer(cls_prob, box_pred, blobs['im_info'], 'TEST', anchor_scales=cfg.ANCHOR_SCALES) print('rois, ',", "tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes': tensor_info_output_boxes, # 'resize_im_info':im_info_output, # 'crop_resize_img': output_crop_resize_img, # 'crop_resize_im_info': output_crop_resize_img_info,},", "of image is unknown at compile-time. image = tf.image.decode_image(image_buffer, channels=3) image.set_shape([256, 256, 256,3])", "os.path.join(tf.compat.as_bytes(export_path_base), tf.compat.as_bytes(str(args.model_version))) ''' builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained model to', export_path) print('Exporting trained", "= tf_example['image/encoded'] image_string = tf.reshape(jpegs, shape=[]) jpeg= preprocess_image(image_string) print('jpeg,jpeg.shape[]', jpeg, jpeg.shape) output_tensor_cls_prob,output_tensor_box_pred =", "tf.multiply(image, 2.0) return image def query_ctpn(sess, cv2img): \"\"\"Args: sess: tensorflow session cfg: CTPN", "#print('query_pb : img, ', img) img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales", "''' builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained model to', export_path) print('Exporting trained model ')", "# self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes') # After this point, all image pixels reside", "float(max_scale) / max(im.shape[0], im.shape[1]) return cv2.resize(im, None, None, fx=f, fy=f, interpolation=cv2.INTER_LINEAR), f def", "blobs['data']}) #print('cls_prob, ', cls_prob, box_pred ) print('box_pred, ', box_pred ) rois, _ =", "they're rescaled to (-1, 1). The various # adjust_* ops all require this", "def export(): ''' No 1 Sess outf of 2 : ctpn_sess ''' cfg_from_file(os.path.join(dir_path,", "max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale: f = float(max_scale)", "cv2 from tensorflow.python.platform import gfile import glob import shutil dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path,", "max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales = _get_blobs(img, None) if cfg.TEST.HAS_RPN: im_blob = blobs['data'] blobs['im_info'] =", "boxes: min_x = min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale))", "cv2img): \"\"\"Args: sess: tensorflow session cfg: CTPN config img: numpy array image Returns:", "result_boxes) #crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes) # output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #----------", "import Timer from lib.text_connector.detectors import TextDetector from lib.text_connector.text_connect_cfg import Config as TextLineCfg from", "''' No 2 Sess outf of 2:sess ''' with tf.Session() as sess: with", "# Convert boxes to bouding rectangles rects = [] for box in boxes:", "image.set_shape([256, 256, 256,3]) # self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes') # After this point, all", "trained model to', export_path) print('Exporting trained model ') raw_image = tf.placeholder(tf.string, name='tf_box') feature_configs", "tf.convert_to_tensor(result_boxes) tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes) prediction_post_signature = ( tf.saved_model.signature_def_utils.build_signature_def( inputs={'images': tensor_info_input}, outputs={'detection_boxes': tensor_info_output_boxes}, #outputs={'detection_boxes':", "ctpn_sess.run(tf.global_variables_initializer()) cv2img = cv2.imread(\"../data/demo/006.jpg\", cv2.IMREAD_COLOR) result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating boxes done') ''' No 2", "boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2]) print('boxes=textdetector, ', boxes ) # Convert boxes", "result_boxes=query_ctpn(ctpn_sess, cv2img) print('Creating boxes done') ''' No 2 Sess outf of 2:sess '''", "[0, 1) image = tf.subtract(image, 0.5) image = tf.multiply(image, 2.0) return image def", "cls_prob, box_pred = sess.run([output_cls_box, output_box_pred], feed_dict={input_img: blobs['data']}) #print('cls_prob, ', cls_prob, box_pred ) print('box_pred,", "im.shape[1]) if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale: f", "1 Sess outf of 2 : ctpn_sess ''' cfg_from_file(os.path.join(dir_path, 'text_post.yml')) config = tf.ConfigProto(allow_soft_placement=True)", "0] #print('scores, ', scores ) boxes = rois[:, 1:5] / im_scales[0] #print('boxes=rois, ',", "shutil dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(dir_path, '..')) from lib.networks.factory import get_network from lib.fast_rcnn.config import", "img) img, scale = resize_im(cv2img, scale=TextLineCfg.SCALE, max_scale=TextLineCfg.MAX_SCALE) blobs, im_scales = _get_blobs(img, None) if", "None) if cfg.TEST.HAS_RPN: im_blob = blobs['data'] blobs['im_info'] = np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32) cls_prob,", "output_crop_resize_img = tf.saved_model.utils.build_tensor_info(crop_resize_img) #output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- ''' result_boxes= np.array(result_boxes, dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes)", "= sess.graph.get_tensor_by_name('Placeholder:0') output_cls_box = sess.graph.get_tensor_by_name('Reshape_2:0') output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0') #print('query_pb : img, ', img)", "<reponame>kspook/text-detection-ctpn01 from __future__ import print_function import tensorflow as tf import numpy as np", "int(box[4]/scale), int(box[6]/scale)) max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x, min_y), (max_x, max_y)]) print('rects.append,", "print_function import tensorflow as tf import numpy as np import os, sys, cv2", "#output_crop_resize_img_info = tf.saved_model.utils.build_tensor_info(crop_resize_im_info) #---------- ''' result_boxes= np.array(result_boxes, dtype=np.float32) result_boxes= tf.convert_to_tensor(result_boxes) tensor_info_output_boxes = tf.saved_model.utils.build_tensor_info(result_boxes)", "containing 87.5% of # the original image. image = tf.image.central_crop(image, central_fraction=0.875) image =", "lib.fast_rcnn.config import cfg, cfg_from_file from lib.fast_rcnn.test import test_ctpn from lib.utils.timer import Timer from", "an unknown height and width # that is set dynamically by decode_jpeg. In", "image = tf.expand_dims(image, 0) image = tf.squeeze(image, [0]) # Finally, rescale to [-1,1]", "max_y = max(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) rects.append([(min_x, min_y), (max_x, max_y)]) print('rects.append, ', rects)", "height and width # that is set dynamically by decode_jpeg. In other words,", "builder = tf.saved_model.builder.SavedModelBuilder('../exportPo/1') #print('Exporting trained model to', export_path) print('Exporting trained model ') raw_image", "image contains an unknown height and width # that is set dynamically by", "textdetector = TextDetector() print('textDetector, ', textdetector ) boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2])", "min(int(box[0]/scale), int(box[2]/scale), int(box[4]/scale), int(box[6]/scale)) min_y = min(int(box[1]/scale), int(box[3]/scale), int(box[5]/scale), int(box[7]/scale)) max_x = max(int(box[0]/scale),", "[(xmin, ymin), (xmax, ymax)] (xmin, ymin) ------------- | | ---------------- (xmax, ymax) \"\"\"", "tf_example = tf.parse_example(raw_image , feature_configs) jpegs = tf_example['image/encoded'] image_string = tf.reshape(jpegs, shape=[]) jpeg=", "= tf.import_graph_def\\ (tf.get_default_graph().as_graph_def(), input_map={'Placeholder:0': jpeg}, return_elements=['Reshape_2:0','rpn_bbox_pred/Reshape_1:0']) tensor_info_input = tf.saved_model.utils.build_tensor_info(raw_image) tensor_info_output_cls_prob = tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred", "* max(im.shape[0], im.shape[1]) > max_scale: f = float(max_scale) / max(im.shape[0], im.shape[1]) return cv2.resize(im,", "as tf import numpy as np import os, sys, cv2 from tensorflow.python.platform import", "of # the original image. image = tf.image.central_crop(image, central_fraction=0.875) image = tf.expand_dims(image, 0)", "Returns: A list of detected bounding boxes, each bounding box have followed coordinates:", ") boxes = textdetector.detect(boxes, scores[:, np.newaxis], img.shape[:2]) print('boxes=textdetector, ', boxes ) # Convert", "float. image = tf.image.convert_image_dtype(image, dtype=tf.float32) # Crop the central region of the image", "query_ctpn(sess, cv2img): \"\"\"Args: sess: tensorflow session cfg: CTPN config img: numpy array image", "= sess.run([output_cls_box, output_box_pred], feed_dict={input_img: blobs['data']}) #print('cls_prob, ', cls_prob, box_pred ) print('box_pred, ', box_pred", "import TextDetector from lib.text_connector.text_connect_cfg import Config as TextLineCfg from lib.fast_rcnn.test import _get_blobs from", "dtype=tf.string), } tf_example = tf.parse_example(raw_image , feature_configs) jpegs = tf_example['image/encoded'] image_string = tf.reshape(jpegs,", "tf.saved_model.utils.build_tensor_info(output_tensor_cls_prob) tensor_info_output_box_pred = tf.saved_model.utils.build_tensor_info(output_tensor_box_pred) ''' #crop_resize_img,crop_resize_im_info = resize_im(cv2img, result_boxes) #crop_resize_img,crop_resize_im_info = crop_resize_image(imageplaceholder_info, result_boxes)", "self.img_pl = tf.placeholder(tf.string, name='input_image_as_bytes') # After this point, all image pixels reside in" ]
[ "obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj):", "return obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj): return", "FilesDone): return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone) def convert_all_entities(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_entities(obj,", "obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj)", "return obj.__hdl_converter__.get_Name_array(obj) def length(obj): return obj.__hdl_converter__.length(obj) def to_arglist(obj, name, parent, withDefault=False, astParser=None): return", "ouputFolder, FilesDone): return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone) def convert_all(obj, ouputFolder): return obj.__hdl_converter__.convert_all(obj, ouputFolder) def", "def impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_multi(obj,args): return", "prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj, parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter =", "obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj): return obj.__hdl_converter__.parse_file(obj) def impl_includes(obj, name, parent):", "name, parent): return obj.__hdl_converter__.def_includes(obj, name, parent) def def_record_Member(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member(obj,", "return obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj, parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False):", "impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str)", "func_arg, arg) def impl_get_attribute(obj, attName,parent = None): return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent) def impl_slice(obj,", "VarSymb) def impl_architecture_header(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args): return", "obj.__hdl_converter__.impl_entity_port(obj, name) def impl_function_argument(obj, func_arg, arg): return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg) def impl_get_attribute(obj, attName,parent", "get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj):", "return obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,args):", "None): return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent) def impl_slice(obj, sl, astParser=None): return obj.__hdl_converter__.impl_slice(obj, sl, astParser)", "obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser) def impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj, rhs, astParser=None, context_str=None): return", "astParser=None): return obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser) def impl_add(obj, args): return obj.__hdl_converter__.impl_add(obj, args) def", "arg) def impl_get_attribute(obj, attName,parent = None): return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent) def impl_slice(obj, sl,", "def function_name_modifier(obj, name, varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix) def impl_get_value(obj, ReturnToObj=None, astParser=None): return", "def get_HDL_name(obj, parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj,", "astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser) def impl_symbol_instantiation(obj, VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def", "obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj, args) def", "impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter ,ForceExpand) def get_extractedTypes(obj):", "def_record_Member(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout) def def_record_Member_Default(obj, name, parent,", "rhs, astParser) def function_name_modifier(obj, name, varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix) def impl_get_value(obj, ReturnToObj=None,", "get_HDL_name(obj, parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj, parent,", "def impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj) def", "impl_symbol_instantiation(obj, VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def impl_architecture_header(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj): return", "obj.__hdl_converter__.length(obj) def to_arglist(obj, name, parent, withDefault=False, astParser=None): return obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault, astParser)", "def get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj, inOut): return obj.__hdl_converter__.InOut_t2str2(inOut)", "astParser) def impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign(obj, rhs,", "args) def impl_function_call(obj, name, args, astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser) def impl_symbol_instantiation(obj,", "obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj)", "impl_sub(obj, args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_to_bool(obj, astParser): return obj.__hdl_converter__.impl_to_bool(obj, astParser) def impl_bit_and(obj,", "exclude_class_type=None, filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout) def get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj) def length(obj): return", "astParser) def function_name_modifier(obj, name, varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix) def impl_get_value(obj, ReturnToObj=None, astParser=None):", "impl_function_call(obj, name, args, astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser) def impl_symbol_instantiation(obj, VarSymb=\"variable\"): return", "astParser=astParser) def impl_symbol_instantiation(obj, VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def impl_architecture_header(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj) def", "return obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj, inOut): return obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj):", "FilesDone) def convert_all_impl(obj, ouputFolder, FilesDone): return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone) def convert_all(obj, ouputFolder): return", "return obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj, inOut): return obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj):", "name, parent, withDefault, astParser) def get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj, pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj,", "return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone) def convert_all_entities(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder,", "def length(obj): return obj.__hdl_converter__.length(obj) def to_arglist(obj, name, parent, withDefault=False, astParser=None): return obj.__hdl_converter__.to_arglist(obj, name,", "get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj, inOut): return obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj) def", "rhs, astParser, context_str) def impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str)", "parent, withDefault=False, astParser=None): return obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault, astParser) def get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj)", "parent, Inout) def def_record_Member_Default(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout) def", "def get_component_suffix(obj, Inout_type, varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type) def prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj) def", "name): return obj.__hdl_converter__.impl_entity_port(obj, name) def impl_function_argument(obj, func_arg, arg): return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg) def", "parent_list) def get_component_suffix(obj, Inout_type, varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type) def prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj)", "obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj, pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def get_free_symbols(obj, name, parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name,", "get_call_member_function(obj, name, args): return obj.__hdl_converter__.get_call_member_function(obj, name, args) def impl_function_call(obj, name, args, astParser=None): return", "def get_dependency_objects(obj, dep_list): return obj.__hdl_converter__.get_dependency_objects(obj, dep_list) def ops2str(obj, ops): return obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj):", "context_str=None): return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str) def impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj,", "context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str) def get_call_member_function(obj, name, args): return obj.__hdl_converter__.get_call_member_function(obj, name,", "obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser) def impl_add(obj, args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj, args):", "astParser) def impl_add(obj, args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj, args): return obj.__hdl_converter__.impl_sub(obj, args)", "return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def get_free_symbols(obj, name, parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def get_component_suffix(obj, Inout_type,", "name, parent) def impl_entity_port(obj, name): return obj.__hdl_converter__.impl_entity_port(obj, name) def impl_function_argument(obj, func_arg, arg): return", "def impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk) def", "name, parent, Inout) def def_record_Member_Default(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout)", "impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent):", "return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type) def prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj, parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix)", "obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj, args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_to_bool(obj, astParser): return obj.__hdl_converter__.impl_to_bool(obj,", "obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser) def impl_symbol_instantiation(obj, VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def impl_architecture_header(obj): prepare_for_conversion(obj)", "return obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault, astParser) def get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj, pushpull):", "obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,args): return", "<reponame>HardwareDesignWithPython/HDPython def get_dependency_objects(obj, dep_list): return obj.__hdl_converter__.get_dependency_objects(obj, dep_list) def ops2str(obj, ops): return obj.__hdl_converter__.ops2str(ops) def", "return obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_packages(obj,", "def get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj, inOut): return obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj)", "def_entity_port(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def", "return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter", "name, parent): return obj.__hdl_converter__.def_packet_header(obj, name, parent) def def_packet_body(obj, name, parent): return obj.__hdl_converter__.def_packet_body(obj, name,", "name, args) def impl_function_call(obj, name, args, astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser) def", "isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj, ouputFolder, x, FilesDone): return", "return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix) def impl_get_value(obj, ReturnToObj=None, astParser=None): return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser) def", "obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone) def convert_all(obj, ouputFolder): return obj.__hdl_converter__.convert_all(obj, ouputFolder) def get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj)", "length(obj): return obj.__hdl_converter__.length(obj) def to_arglist(obj, name, parent, withDefault=False, astParser=None): return obj.__hdl_converter__.to_arglist(obj, name, parent,", "varsignal_type) def prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj, parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None, InOut_Filter=None,", "ouputFolder, x, FilesDone) def convert_all_entities(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone)", "get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj, pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def get_free_symbols(obj, name, parent_list=[]):", "impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj,", "astParser, context_str) def impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str) def", "impl_slice(obj, sl, astParser=None): return obj.__hdl_converter__.impl_slice(obj, sl, astParser) def impl_compare(obj, ops, rhs, astParser=None): return", "rhs, astParser): return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser) def function_name_modifier(obj, name, varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj, name,", "get_component_suffix(obj, Inout_type, varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type) def prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj,", "return obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def", "def convert_all_impl(obj, ouputFolder, FilesDone): return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone) def convert_all(obj, ouputFolder): return obj.__hdl_converter__.convert_all(obj,", "get_free_symbols(obj, name, parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def get_component_suffix(obj, Inout_type, varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj, Inout_type,", "def impl_sub(obj, args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_to_bool(obj, astParser): return obj.__hdl_converter__.impl_to_bool(obj, astParser) def", "context_str) def get_call_member_function(obj, name, args): return obj.__hdl_converter__.get_call_member_function(obj, name, args) def impl_function_call(obj, name, args,", "name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout) def def_packet_header(obj, name, parent): return", "def impl_function_argument(obj, func_arg, arg): return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg) def impl_get_attribute(obj, attName,parent = None):", "obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type) def prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj, parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def", "obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def impl_architecture_header(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args):", "attName, parent) def impl_slice(obj, sl, astParser=None): return obj.__hdl_converter__.impl_slice(obj, sl, astParser) def impl_compare(obj, ops,", "rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str) def impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None):", "def impl_compare(obj, ops, rhs, astParser=None): return obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser) def impl_add(obj, args):", "return obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj): return", "return obj.__hdl_converter__.impl_slice(obj, sl, astParser) def impl_compare(obj, ops, rhs, astParser=None): return obj.__hdl_converter__.impl_compare(obj, ops, rhs,", "parent) def def_record_Member(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout) def def_record_Member_Default(obj,", "args=args, astParser=astParser) def impl_symbol_instantiation(obj, VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def impl_architecture_header(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj)", "def IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x,", "def def_record_Member_Default(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout) def def_packet_header(obj, name,", "return obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk): return", "impl_entity_port(obj, name): return obj.__hdl_converter__.impl_entity_port(obj, name) def impl_function_argument(obj, func_arg, arg): return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg)", "obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone) def convert_all_impl(obj, ouputFolder, FilesDone): return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone) def", "name, parent, withDefault=False, astParser=None): return obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault, astParser) def get_inout_type_recursive(obj): return", "impl_reasign(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str) def impl_reasign_rshift_(obj, rhs, astParser=None,", "astParser, context_str) def get_call_member_function(obj, name, args): return obj.__hdl_converter__.get_call_member_function(obj, name, args) def impl_function_call(obj, name,", "FilesDone) def convert_all(obj, ouputFolder): return obj.__hdl_converter__.convert_all(obj, ouputFolder) def get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj):", "return obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj): return obj.__hdl_converter__.parse_file(obj) def impl_includes(obj, name,", "obj.__hdl_converter__.impl_multi(obj, args) def def_entity_port(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj):", "return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def get_component_suffix(obj, Inout_type, varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type) def prepare_for_conversion(obj):", "pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def get_free_symbols(obj, name, parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def get_component_suffix(obj,", "obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj): return obj.__hdl_converter__.parse_file(obj)", "def parse_file(obj): return obj.__hdl_converter__.parse_file(obj) def impl_includes(obj, name, parent): return obj.__hdl_converter__.impl_includes(obj, name, parent) def", "VaribleSignalFilter = None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter ,ForceExpand) def get_extractedTypes(obj): primary =", "name, varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix) def impl_get_value(obj, ReturnToObj=None, astParser=None): return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj,", "obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def get_free_symbols(obj, name, parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def get_component_suffix(obj, Inout_type, varsignal_type):", "def isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj, ouputFolder, x, FilesDone):", "obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout) def get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj) def length(obj): return obj.__hdl_converter__.length(obj) def to_arglist(obj,", "def get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj) def", "obj.__hdl_converter__.def_packet_body(obj, name, parent) def impl_entity_port(obj, name): return obj.__hdl_converter__.impl_entity_port(obj, name) def impl_function_argument(obj, func_arg, arg):", "obj.__hdl_converter__.get_call_member_function(obj, name, args) def impl_function_call(obj, name, args, astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser)", "name) def impl_function_argument(obj, func_arg, arg): return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg) def impl_get_attribute(obj, attName,parent =", "def impl_get_attribute(obj, attName,parent = None): return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent) def impl_slice(obj, sl, astParser=None):", "return obj.__hdl_converter__.get_call_member_function(obj, name, args) def impl_function_call(obj, name, args, astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args,", "Inout_type, varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type) def prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj, parent,suffix):", "parent, withDefault, astParser) def get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj, pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull)", "impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj, args) def def_entity_port(obj): prepare_for_conversion(obj) return", "sl, astParser) def impl_compare(obj, ops, rhs, astParser=None): return obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser) def", "ReturnToObj, astParser) def impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign(obj,", "args): return obj.__hdl_converter__.get_call_member_function(obj, name, args) def impl_function_call(obj, name, args, astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj, name=name,", "varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type) def prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj, parent,suffix): return", "impl_includes(obj, name, parent): return obj.__hdl_converter__.impl_includes(obj, name, parent) def def_includes(obj, name, parent): return obj.__hdl_converter__.def_includes(obj,", "def def_packet_header(obj, name, parent): return obj.__hdl_converter__.def_packet_header(obj, name, parent) def def_packet_body(obj, name, parent): return", "obj.__hdl_converter__.def_includes(obj, name, parent) def def_record_Member(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout)", "get_dependency_objects(obj, dep_list): return obj.__hdl_converter__.get_dependency_objects(obj, dep_list) def ops2str(obj, ops): return obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj): return", "return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str) def impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs,", "return obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent): return", "return obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout) def get_Name_array(obj): return", "ops): return obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj)", "impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj):", "parent): return obj.__hdl_converter__.def_packet_body(obj, name, parent) def impl_entity_port(obj, name): return obj.__hdl_converter__.impl_entity_port(obj, name) def impl_function_argument(obj,", "return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj, args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_to_bool(obj, astParser): return", "obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj)", "name, parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def get_component_suffix(obj, Inout_type, varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type)", "return obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone) def", "obj.__hdl_converter__.impl_sub(obj, args) def impl_to_bool(obj, astParser): return obj.__hdl_converter__.impl_to_bool(obj, astParser) def impl_bit_and(obj, rhs, astParser): return", "exclude_class_type, filter_inout) def get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj) def length(obj): return obj.__hdl_converter__.length(obj) def to_arglist(obj, name,", "def impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj, args) def def_entity_port(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj): return", "return obj.__hdl_converter__.impl_includes(obj, name, parent) def def_includes(obj, name, parent): return obj.__hdl_converter__.def_includes(obj, name, parent) def", "def def_includes(obj, name, parent): return obj.__hdl_converter__.def_includes(obj, name, parent) def def_record_Member(obj, name, parent, Inout=None):", "dep_list): return obj.__hdl_converter__.get_dependency_objects(obj, dep_list) def ops2str(obj, ops): return obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj)", "obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder,", "return obj.__hdl_converter__.impl_multi(obj, args) def def_entity_port(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj) def", "ops, rhs, astParser=None): return obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser) def impl_add(obj, args): return obj.__hdl_converter__.impl_add(obj,", "args) def impl_to_bool(obj, astParser): return obj.__hdl_converter__.impl_to_bool(obj, astParser) def impl_bit_and(obj, rhs, astParser): return obj.__hdl_converter__.impl_bit_and(obj,", "astParser) def impl_bit_and(obj, rhs, astParser): return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser) def function_name_modifier(obj, name, varSigSuffix):", "name, parent): return obj.__hdl_converter__.def_packet_body(obj, name, parent) def impl_entity_port(obj, name): return obj.__hdl_converter__.impl_entity_port(obj, name) def", "return obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj, pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def get_free_symbols(obj, name, parent_list=[]): return", "def impl_symbol_instantiation(obj, VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def impl_architecture_header(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj):", "obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj, parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False): return", "withDefault=False, astParser=None): return obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault, astParser) def get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj) def", "return obj.__hdl_converter__.convert_all(obj, ouputFolder) def get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj):", "ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone) def convert_all_entities(obj, ouputFolder, x, FilesDone):", "ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone) def convert_all_impl(obj, ouputFolder, FilesDone): return", "return obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj): return", "obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent)", "def impl_reasign(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str) def impl_reasign_rshift_(obj, rhs,", "args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_to_bool(obj, astParser): return obj.__hdl_converter__.impl_to_bool(obj, astParser) def impl_bit_and(obj, rhs,", "return obj.__hdl_converter__.impl_entity_port(obj, name) def impl_function_argument(obj, func_arg, arg): return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg) def impl_get_attribute(obj,", "name, parent, Inout) def def_packet_header(obj, name, parent): return obj.__hdl_converter__.def_packet_header(obj, name, parent) def def_packet_body(obj,", "IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone)", "extract_conversion_types(obj, exclude_class_type=None, filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout) def get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj) def length(obj):", "rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str) def get_call_member_function(obj, name, args): return", "args) def impl_sub(obj, args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_to_bool(obj, astParser): return obj.__hdl_converter__.impl_to_bool(obj, astParser)", "return obj.__hdl_converter__.impl_sub(obj, args) def impl_to_bool(obj, astParser): return obj.__hdl_converter__.impl_to_bool(obj, astParser) def impl_bit_and(obj, rhs, astParser):", "obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter ,ForceExpand) def get_extractedTypes(obj): primary = get_primary_object(obj) prepare_for_conversion(primary) return primary.__hdl_converter__.extractedTypes", "impl_bit_and(obj, rhs, astParser): return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser) def function_name_modifier(obj, name, varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj,", "convert_all_entities(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone) def convert_all_impl(obj, ouputFolder, FilesDone):", "sl, astParser=None): return obj.__hdl_converter__.impl_slice(obj, sl, astParser) def impl_compare(obj, ops, rhs, astParser=None): return obj.__hdl_converter__.impl_compare(obj,", "args, astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser) def impl_symbol_instantiation(obj, VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb)", "get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj) def length(obj): return obj.__hdl_converter__.length(obj) def to_arglist(obj, name, parent, withDefault=False, astParser=None):", "def get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj): return", "return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj, args)", "parent) def def_packet_body(obj, name, parent): return obj.__hdl_converter__.def_packet_body(obj, name, parent) def impl_entity_port(obj, name): return", "VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def impl_architecture_header(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj)", "def InOut_t2str2(obj, inOut): return obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj)", "def impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent) def", "None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter ,ForceExpand) def get_extractedTypes(obj): primary = get_primary_object(obj) prepare_for_conversion(primary)", "rhs, astParser) def impl_add(obj, args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj, args): return obj.__hdl_converter__.impl_sub(obj,", "obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_multi(obj,args):", "get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj)", "impl_add(obj, args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj, args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_to_bool(obj,", "obj.__hdl_converter__.convert_all(obj, ouputFolder) def get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj): return", "def_record_Member_Default(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout) def def_packet_header(obj, name, parent):", "obj.__hdl_converter__.impl_to_bool(obj, astParser) def impl_bit_and(obj, rhs, astParser): return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser) def function_name_modifier(obj, name,", "obj.__hdl_converter__.get_dependency_objects(obj, dep_list) def ops2str(obj, ops): return obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj):", "return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone) def convert_all(obj, ouputFolder): return obj.__hdl_converter__.convert_all(obj, ouputFolder) def get_primary_object(obj): return", "def_packet_body(obj, name, parent): return obj.__hdl_converter__.def_packet_body(obj, name, parent) def impl_entity_port(obj, name): return obj.__hdl_converter__.impl_entity_port(obj, name)", "obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix) def impl_get_value(obj, ReturnToObj=None, astParser=None): return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser) def impl_reasign_type(obj):", "dep_list) def ops2str(obj, ops): return obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj)", "return obj.__hdl_converter__.impl_sub(obj, args) def impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj, args) def def_entity_port(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj)", "def get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj) def", "return obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj, args) def", "obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj, inOut): return obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj): return", "obj.__hdl_converter__.impl_includes(obj, name, parent) def def_includes(obj, name, parent): return obj.__hdl_converter__.def_includes(obj, name, parent) def def_record_Member(obj,", "def convert_all_packages(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone) def convert_all_entities(obj, ouputFolder,", "def get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj) def", "def impl_to_bool(obj, astParser): return obj.__hdl_converter__.impl_to_bool(obj, astParser) def impl_bit_and(obj, rhs, astParser): return obj.__hdl_converter__.impl_bit_and(obj, rhs,", "get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout) def get_Name_array(obj):", "varSigSuffix) def impl_get_value(obj, ReturnToObj=None, astParser=None): return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser) def impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj)", "parent) def impl_slice(obj, sl, astParser=None): return obj.__hdl_converter__.impl_slice(obj, sl, astParser) def impl_compare(obj, ops, rhs,", "obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str) def impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser,", "name, args): return obj.__hdl_converter__.get_call_member_function(obj, name, args) def impl_function_call(obj, name, args, astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj,", "convert_all(obj, ouputFolder): return obj.__hdl_converter__.convert_all(obj, ouputFolder) def get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj)", "astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str) def impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None): return", "args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj, args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_to_bool(obj, astParser):", "impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj, args) def def_entity_port(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj)", "pushpull) def get_free_symbols(obj, name, parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def get_component_suffix(obj, Inout_type, varsignal_type): return", "obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj) def", "def ops2str(obj, ops): return obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def", "return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout) def get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj) def length(obj): return obj.__hdl_converter__.length(obj) def", "name, varSigSuffix) def impl_get_value(obj, ReturnToObj=None, astParser=None): return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser) def impl_reasign_type(obj): return", "parent) def impl_entity_port(obj, name): return obj.__hdl_converter__.impl_entity_port(obj, name) def impl_function_argument(obj, func_arg, arg): return obj.__hdl_converter__.impl_function_argument(obj,", "Inout) def def_packet_header(obj, name, parent): return obj.__hdl_converter__.def_packet_header(obj, name, parent) def def_packet_body(obj, name, parent):", "obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj): return obj.__hdl_converter__.parse_file(obj) def impl_includes(obj, name, parent): return obj.__hdl_converter__.impl_includes(obj, name, parent)", "filter_inout) def get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj) def length(obj): return obj.__hdl_converter__.length(obj) def to_arglist(obj, name, parent,", "impl_function_argument(obj, func_arg, arg): return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg) def impl_get_attribute(obj, attName,parent = None): return", "return obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj): return obj.__hdl_converter__.parse_file(obj) def impl_includes(obj, name, parent): return obj.__hdl_converter__.impl_includes(obj, name,", "ReturnToObj=None, astParser=None): return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser) def impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj, rhs,", "def InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None): return", "FilesDone): return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone) def convert_all(obj, ouputFolder): return obj.__hdl_converter__.convert_all(obj, ouputFolder) def get_primary_object(obj):", "FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj)", "def impl_function_call(obj, name, args, astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser) def impl_symbol_instantiation(obj, VarSymb=\"variable\"):", "function_name_modifier(obj, name, varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix) def impl_get_value(obj, ReturnToObj=None, astParser=None): return obj.__hdl_converter__.impl_get_value(obj,", "astParser): return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser) def function_name_modifier(obj, name, varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix)", "def impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk) def", "attName,parent = None): return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent) def impl_slice(obj, sl, astParser=None): return obj.__hdl_converter__.impl_slice(obj,", "prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk):", "return obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj, inOut):", "def get_free_symbols(obj, name, parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def get_component_suffix(obj, Inout_type, varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj,", "def get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj, pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def get_free_symbols(obj, name,", "InOut_t2str2(obj, inOut): return obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj) def", "ouputFolder): return obj.__hdl_converter__.convert_all(obj, ouputFolder) def get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj) def", "impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj, args)", "obj.__hdl_converter__.impl_sub(obj, args) def impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj, args) def def_entity_port(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj) def", "obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def get_component_suffix(obj, Inout_type, varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type) def prepare_for_conversion(obj): return", "reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj, ouputFolder,", "return obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout) def def_record_Member_Default(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj, name,", "InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter ,ForceExpand) def get_extractedTypes(obj): primary", "def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter ,ForceExpand) def", "x, FilesDone): return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone) def convert_all_entities(obj, ouputFolder, x, FilesDone): return", "return obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type,", "obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout)", "ouputFolder, FilesDone) def convert_all(obj, ouputFolder): return obj.__hdl_converter__.convert_all(obj, ouputFolder) def get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj) def", "obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg) def impl_get_attribute(obj, attName,parent = None): return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent) def", "astParser=None): return obj.__hdl_converter__.impl_slice(obj, sl, astParser) def impl_compare(obj, ops, rhs, astParser=None): return obj.__hdl_converter__.impl_compare(obj, ops,", "obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None):", "obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj, inOut): return obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj): return", "def Has_pushpull_function(obj, pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def get_free_symbols(obj, name, parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list)", "obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj)", "return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str) def get_call_member_function(obj, name, args): return obj.__hdl_converter__.get_call_member_function(obj, name, args)", "obj.__hdl_converter__.parse_file(obj) def impl_includes(obj, name, parent): return obj.__hdl_converter__.impl_includes(obj, name, parent) def def_includes(obj, name, parent):", "return obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser) def impl_add(obj, args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,", "def impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj, args) def def_entity_port(obj): prepare_for_conversion(obj)", "obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter ,ForceExpand)", "def def_record_Member(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout) def def_record_Member_Default(obj, name,", "astParser): return obj.__hdl_converter__.impl_to_bool(obj, astParser) def impl_bit_and(obj, rhs, astParser): return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser) def", "return obj.__hdl_converter__.def_packet_body(obj, name, parent) def impl_entity_port(obj, name): return obj.__hdl_converter__.impl_entity_port(obj, name) def impl_function_argument(obj, func_arg,", "obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout) def def_packet_header(obj, name, parent): return obj.__hdl_converter__.def_packet_header(obj, name, parent) def", "ops2str(obj, ops): return obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj):", "Has_pushpull_function(obj, pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def get_free_symbols(obj, name, parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def", "return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser) def impl_symbol_instantiation(obj, VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def impl_architecture_header(obj):", "parent): return obj.__hdl_converter__.impl_includes(obj, name, parent) def def_includes(obj, name, parent): return obj.__hdl_converter__.def_includes(obj, name, parent)", "return obj.__hdl_converter__.impl_to_bool(obj, astParser) def impl_bit_and(obj, rhs, astParser): return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser) def function_name_modifier(obj,", "return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg) def impl_get_attribute(obj, attName,parent = None): return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent)", "ops, rhs, astParser) def impl_add(obj, args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj, args): return", "obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj, inOut): return", "def impl_bit_and(obj, rhs, astParser): return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser) def function_name_modifier(obj, name, varSigSuffix): return", "get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj):", "def convert_all(obj, ouputFolder): return obj.__hdl_converter__.convert_all(obj, ouputFolder) def get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj): return", "obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout) def def_record_Member_Default(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent,", "astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str) def get_call_member_function(obj, name, args): return obj.__hdl_converter__.get_call_member_function(obj,", "impl_compare(obj, ops, rhs, astParser=None): return obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser) def impl_add(obj, args): return", "impl_architecture_header(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj, args)", "obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk)", "name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout) def def_record_Member_Default(obj, name, parent, Inout=None):", "def impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj) def", "x, FilesDone) def convert_all_impl(obj, ouputFolder, FilesDone): return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone) def convert_all(obj, ouputFolder):", "def get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj) def length(obj): return obj.__hdl_converter__.length(obj) def to_arglist(obj, name, parent, withDefault=False,", "return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter ,ForceExpand) def get_extractedTypes(obj): primary = get_primary_object(obj) prepare_for_conversion(primary) return", "impl_get_attribute(obj, attName,parent = None): return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent) def impl_slice(obj, sl, astParser=None): return", "def convert_all_entities(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone) def convert_all_impl(obj, ouputFolder,", "return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone) def convert_all_impl(obj, ouputFolder, FilesDone): return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone)", "obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault, astParser) def get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj, pushpull): return", "name, parent) def def_packet_body(obj, name, parent): return obj.__hdl_converter__.def_packet_body(obj, name, parent) def impl_entity_port(obj, name):", "obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj, ouputFolder, x,", "def to_arglist(obj, name, parent, withDefault=False, astParser=None): return obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault, astParser) def", "return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser) def impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj, rhs, astParser=None, context_str=None):", "parent): return obj.__hdl_converter__.def_includes(obj, name, parent) def def_record_Member(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member(obj, name,", "x, FilesDone) def convert_all_entities(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone) def", "def impl_slice(obj, sl, astParser=None): return obj.__hdl_converter__.impl_slice(obj, sl, astParser) def impl_compare(obj, ops, rhs, astParser=None):", "name, args, astParser=None): return obj.__hdl_converter__.impl_function_call(obj=obj, name=name, args=args, astParser=astParser) def impl_symbol_instantiation(obj, VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj,", "args) def def_entity_port(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj): return", "parse_file(obj): return obj.__hdl_converter__.parse_file(obj) def impl_includes(obj, name, parent): return obj.__hdl_converter__.impl_includes(obj, name, parent) def def_includes(obj,", "= None): return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent) def impl_slice(obj, sl, astParser=None): return obj.__hdl_converter__.impl_slice(obj, sl,", "get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj, inOut): return obj.__hdl_converter__.InOut_t2str2(inOut) def", "return obj.__hdl_converter__.def_packet_header(obj, name, parent) def def_packet_body(obj, name, parent): return obj.__hdl_converter__.def_packet_body(obj, name, parent) def", "inOut): return obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj,", "return obj.__hdl_converter__.impl_get_attribute(obj, attName, parent) def impl_slice(obj, sl, astParser=None): return obj.__hdl_converter__.impl_slice(obj, sl, astParser) def", "return obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk): return", "args) def impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj, args) def def_entity_port(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj):", "impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj):", "FilesDone) def convert_all_entities(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone) def convert_all_impl(obj,", "return obj.__hdl_converter__.get_dependency_objects(obj, dep_list) def ops2str(obj, ops): return obj.__hdl_converter__.ops2str(ops) def get_MemfunctionCalls(obj): return obj.__hdl_converter__.get_MemfunctionCalls(obj) def", "return obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj): return", "return obj.__hdl_converter__.parse_file(obj) def impl_includes(obj, name, parent): return obj.__hdl_converter__.impl_includes(obj, name, parent) def def_includes(obj, name,", "InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj,", "def def_entity_port(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj)", "get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj):", "return obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj) def", "def get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout) def", "impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str) def get_call_member_function(obj, name, args):", "return obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj): return", "astParser) def impl_compare(obj, ops, rhs, astParser=None): return obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser) def impl_add(obj,", "impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj):", "ouputFolder, x, FilesDone) def convert_all_impl(obj, ouputFolder, FilesDone): return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone) def convert_all(obj,", "parent, Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout) def def_packet_header(obj, name, parent): return obj.__hdl_converter__.def_packet_header(obj,", "return obj.__hdl_converter__.def_includes(obj, name, parent) def def_record_Member(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member(obj, name, parent,", "parent) def def_includes(obj, name, parent): return obj.__hdl_converter__.def_includes(obj, name, parent) def def_record_Member(obj, name, parent,", "obj.__hdl_converter__.impl_slice(obj, sl, astParser) def impl_compare(obj, ops, rhs, astParser=None): return obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser)", "name, parent) def def_record_Member(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout) def", "arg): return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg) def impl_get_attribute(obj, attName,parent = None): return obj.__hdl_converter__.impl_get_attribute(obj, attName,", "def get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj): return obj.__hdl_converter__.parse_file(obj) def", "get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj): return obj.__hdl_converter__.parse_file(obj) def impl_includes(obj,", "to_arglist(obj, name, parent, withDefault=False, astParser=None): return obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault, astParser) def get_inout_type_recursive(obj):", "def get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj) def impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj) def", "impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj): return obj.__hdl_converter__.get_assiment_op(obj) def get_Inout(obj,parent): return obj.__hdl_converter__.get_Inout(obj,parent) def InOut_t2str2(obj,", "def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout) def get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj) def", "return obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str) def", "def impl_add(obj, args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj, args): return obj.__hdl_converter__.impl_sub(obj, args) def", "def impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser,", "parent, Inout) def def_packet_header(obj, name, parent): return obj.__hdl_converter__.def_packet_header(obj, name, parent) def def_packet_body(obj, name,", "return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser) def function_name_modifier(obj, name, varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix) def", "astParser=None): return obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault, astParser) def get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj,", "convert_all_impl(obj, ouputFolder, FilesDone): return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder, FilesDone) def convert_all(obj, ouputFolder): return obj.__hdl_converter__.convert_all(obj, ouputFolder)", "return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj): return", "parent, Inout=None): return obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout) def def_record_Member_Default(obj, name, parent, Inout=None): return", "obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj): return obj.__hdl_converter__.get_type_simple_template(obj)", "def impl_includes(obj, name, parent): return obj.__hdl_converter__.impl_includes(obj, name, parent) def def_includes(obj, name, parent): return", "obj.__hdl_converter__.impl_get_attribute(obj, attName, parent) def impl_slice(obj, sl, astParser=None): return obj.__hdl_converter__.impl_slice(obj, sl, astParser) def impl_compare(obj,", "def get_call_member_function(obj, name, args): return obj.__hdl_converter__.get_call_member_function(obj, name, args) def impl_function_call(obj, name, args, astParser=None):", "def impl_get_value(obj, ReturnToObj=None, astParser=None): return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser) def impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj) def", "parent): return obj.__hdl_converter__.def_packet_header(obj, name, parent) def def_packet_body(obj, name, parent): return obj.__hdl_converter__.def_packet_body(obj, name, parent)", "def impl_entity_port(obj, name): return obj.__hdl_converter__.impl_entity_port(obj, name) def impl_function_argument(obj, func_arg, arg): return obj.__hdl_converter__.impl_function_argument(obj, func_arg,", "def def_packet_body(obj, name, parent): return obj.__hdl_converter__.def_packet_body(obj, name, parent) def impl_entity_port(obj, name): return obj.__hdl_converter__.impl_entity_port(obj,", "def get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj) def", "parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter = None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter,", "obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign(obj, rhs, astParser, context_str) def impl_reasign_rshift_(obj,", "def impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj, args) def impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj,", "astParser=None): return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser) def impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj, rhs, astParser=None,", "return obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj): return", "FilesDone): return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone) def convert_all_impl(obj, ouputFolder, FilesDone): return obj.__hdl_converter__.convert_all_impl(obj, ouputFolder,", "convert_all_packages(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone) def convert_all_entities(obj, ouputFolder, x,", "return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def impl_architecture_header(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj) def", "def impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj): return obj.__hdl_converter__.parse_file(obj) def impl_includes(obj, name, parent): return", "impl_get_value(obj, ReturnToObj=None, astParser=None): return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser) def impl_reasign_type(obj): return obj.__hdl_converter__.impl_reasign_type(obj) def impl_reasign(obj,", "return obj.__hdl_converter__.length(obj) def to_arglist(obj, name, parent, withDefault=False, astParser=None): return obj.__hdl_converter__.to_arglist(obj, name, parent, withDefault,", "name, parent): return obj.__hdl_converter__.impl_includes(obj, name, parent) def def_includes(obj, name, parent): return obj.__hdl_converter__.def_includes(obj, name,", "Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout) def def_packet_header(obj, name, parent): return obj.__hdl_converter__.def_packet_header(obj, name,", "impl_constructor(obj): return obj.__hdl_converter__.impl_constructor(obj) def parse_file(obj): return obj.__hdl_converter__.parse_file(obj) def impl_includes(obj, name, parent): return obj.__hdl_converter__.impl_includes(obj,", "filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout) def get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj) def length(obj): return obj.__hdl_converter__.length(obj)", "func_arg, arg): return obj.__hdl_converter__.impl_function_argument(obj, func_arg, arg) def impl_get_attribute(obj, attName,parent = None): return obj.__hdl_converter__.impl_get_attribute(obj,", "return obj.__hdl_converter__.InOut_t2str2(inOut) def InOut_t2str(obj): return obj.__hdl_converter__.InOut_t2str(obj) def get_default_value(obj): return obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj, exclude_class_type=None,", "obj.__hdl_converter__.def_packet_header(obj, name, parent) def def_packet_body(obj, name, parent): return obj.__hdl_converter__.def_packet_body(obj, name, parent) def impl_entity_port(obj,", "obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj)", "obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj)", "obj.__hdl_converter__.get_Name_array(obj) def length(obj): return obj.__hdl_converter__.length(obj) def to_arglist(obj, name, parent, withDefault=False, astParser=None): return obj.__hdl_converter__.to_arglist(obj,", "impl_to_bool(obj, astParser): return obj.__hdl_converter__.impl_to_bool(obj, astParser) def impl_bit_and(obj, rhs, astParser): return obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser)", "get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj): return obj.__hdl_converter__.get_type_simple(obj) def get_type_simple_template(obj):", "parent_list=[]): return obj.__hdl_converter__.get_free_symbols(obj,name, parent_list) def get_component_suffix(obj, Inout_type, varsignal_type): return obj.__hdl_converter__.get_component_suffix(obj, Inout_type, varsignal_type) def", "args) def impl_sub(obj,args): return obj.__hdl_converter__.impl_sub(obj, args) def impl_multi(obj,args): return obj.__hdl_converter__.impl_multi(obj, args) def def_entity_port(obj):", "= None,ForceExpand=False): return obj.__hdl_converter__.impl_get_init_values(obj, parent, InOut_Filter, VaribleSignalFilter ,ForceExpand) def get_extractedTypes(obj): primary = get_primary_object(obj)", "obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj)", "prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj, args) def", "def_includes(obj, name, parent): return obj.__hdl_converter__.def_includes(obj, name, parent) def def_record_Member(obj, name, parent, Inout=None): return", "def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj): return obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj,", "def impl_process_push(obj,clk): return obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj) def", "Inout_type, varsignal_type) def prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj, parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None,", "context_str) def impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str) def get_call_member_function(obj,", "get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj):", "x, FilesDone): return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x, FilesDone) def convert_all_impl(obj, ouputFolder, FilesDone): return obj.__hdl_converter__.convert_all_impl(obj,", "obj.__hdl_converter__.impl_bit_and(obj, rhs, astParser) def function_name_modifier(obj, name, varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix) def impl_get_value(obj,", "def get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj) def", "obj.__hdl_converter__.get_MemfunctionCalls(obj) def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj):", "Inout=None): return obj.__hdl_converter__.def_record_Member(obj, name, parent, Inout) def def_record_Member_Default(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj,", "obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone) def convert_all_entities(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_entities(obj, ouputFolder, x,", "Inout) def def_record_Member_Default(obj, name, parent, Inout=None): return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout) def def_packet_header(obj,", "obj.__hdl_converter__.def_entity_port(obj) def impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk)", "def FlagFor_TemplateMissing(obj): obj.__hdl_converter__.FlagFor_TemplateMissing(obj) def reset_TemplateMissing(obj): obj.__hdl_converter__.reset_TemplateMissing(obj) def isTemplateMissing(obj): return obj.__hdl_converter__.isTemplateMissing(obj) def IsSucessfullConverted(obj): return", "obj.__hdl_converter__.IsSucessfullConverted(obj) def convert_all_packages(obj, ouputFolder, x, FilesDone): return obj.__hdl_converter__.convert_all_packages(obj, ouputFolder, x, FilesDone) def convert_all_entities(obj,", "ouputFolder) def get_primary_object(obj): return obj.__hdl_converter__.get_primary_object(obj) def get_packet_file_name(obj): return obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj)", "name, parent) def def_includes(obj, name, parent): return obj.__hdl_converter__.def_includes(obj, name, parent) def def_record_Member(obj, name,", "def impl_reasign_rshift_(obj, rhs, astParser=None, context_str=None): return obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str) def get_call_member_function(obj, name,", "def_packet_header(obj, name, parent): return obj.__hdl_converter__.def_packet_header(obj, name, parent) def def_packet_body(obj, name, parent): return obj.__hdl_converter__.def_packet_body(obj,", "varSigSuffix): return obj.__hdl_converter__.function_name_modifier(obj, name, varSigSuffix) def impl_get_value(obj, ReturnToObj=None, astParser=None): return obj.__hdl_converter__.impl_get_value(obj, ReturnToObj, astParser)", "obj.__hdl_converter__.impl_reasign_rshift_(obj, rhs, astParser, context_str) def get_call_member_function(obj, name, args): return obj.__hdl_converter__.get_call_member_function(obj, name, args) def", "rhs, astParser=None): return obj.__hdl_converter__.impl_compare(obj, ops, rhs, astParser) def impl_add(obj, args): return obj.__hdl_converter__.impl_add(obj, args)", "def prepare_for_conversion(obj): return obj.__hdl_converter__.prepare_for_conversion(obj) def get_HDL_name(obj, parent,suffix): return obj.__hdl_converter__.get_HDL_name(obj,parent,suffix) def impl_get_init_values(obj,parent=None, InOut_Filter=None, VaribleSignalFilter", "return obj.__hdl_converter__.def_record_Member_Default(obj, name, parent, Inout) def def_packet_header(obj, name, parent): return obj.__hdl_converter__.def_packet_header(obj, name, parent)", "impl_process_header(obj): return obj.__hdl_converter__.impl_process_header(obj) def impl_process_sensitivity_list(obj): return obj.__hdl_converter__.impl_process_sensitivity_list(obj) def impl_process_pull(obj,clk): return obj.__hdl_converter__.impl_process_pull(obj,clk) def impl_process_push(obj,clk):", "def impl_architecture_header(obj): prepare_for_conversion(obj) return obj.__hdl_converter__.impl_architecture_header(obj) def impl_architecture_body(obj): return obj.__hdl_converter__.impl_architecture_body(obj) def impl_add(obj,args): return obj.__hdl_converter__.impl_add(obj,", "withDefault, astParser) def get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj, pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def", "rhs, astParser, context_str) def get_call_member_function(obj, name, args): return obj.__hdl_converter__.get_call_member_function(obj, name, args) def impl_function_call(obj,", "return obj.__hdl_converter__.get_packet_file_name(obj) def get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj): return", "astParser) def get_inout_type_recursive(obj): return obj.__hdl_converter__.get_inout_type_recursive(obj) def Has_pushpull_function(obj, pushpull): return obj.__hdl_converter__.Has_pushpull_function(obj, pushpull) def get_free_symbols(obj,", "return obj.__hdl_converter__.impl_process_push(obj,clk) def impl_enter_rising_edge(obj): return obj.__hdl_converter__.impl_enter_rising_edge(obj) def impl_exit_rising_edge(obj): return obj.__hdl_converter__.impl_exit_rising_edge(obj) def get_assiment_op(obj): return", "name=name, args=args, astParser=astParser) def impl_symbol_instantiation(obj, VarSymb=\"variable\"): return obj.__hdl_converter__.impl_symbol_instantiation(obj, VarSymb) def impl_architecture_header(obj): prepare_for_conversion(obj) return", "get_packet_file_content(obj): return obj.__hdl_converter__.get_packet_file_content(obj) def get_enity_file_content(obj): return obj.__hdl_converter__.get_enity_file_content(obj) def get_entity_file_name(obj): return obj.__hdl_converter__.get_entity_file_name(obj) def get_type_simple(obj):", "obj.__hdl_converter__.get_default_value(obj) def extract_conversion_types(obj, exclude_class_type=None, filter_inout=None): return obj.__hdl_converter__.extract_conversion_types(obj, exclude_class_type, filter_inout) def get_Name_array(obj): return obj.__hdl_converter__.get_Name_array(obj)" ]
[ "= [ ('posts', '0009_auto_20201023_1643'), ] operations = [ migrations.AlterField( model_name='post', name='title', field=models.CharField(help_text='Назовите пост',", "2.2.6 on 2020-11-03 16:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "Django 2.2.6 on 2020-11-03 16:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "[ ('posts', '0009_auto_20201023_1643'), ] operations = [ migrations.AlterField( model_name='post', name='title', field=models.CharField(help_text='Назовите пост', max_length=200,", "operations = [ migrations.AlterField( model_name='post', name='title', field=models.CharField(help_text='Назовите пост', max_length=200, verbose_name='Название поста'), ), ]", "<reponame>abi83/YaPractice # Generated by Django 2.2.6 on 2020-11-03 16:39 from django.db import migrations,", "] operations = [ migrations.AlterField( model_name='post', name='title', field=models.CharField(help_text='Назовите пост', max_length=200, verbose_name='Название поста'), ),", "Generated by Django 2.2.6 on 2020-11-03 16:39 from django.db import migrations, models class", "('posts', '0009_auto_20201023_1643'), ] operations = [ migrations.AlterField( model_name='post', name='title', field=models.CharField(help_text='Назовите пост', max_length=200, verbose_name='Название", "models class Migration(migrations.Migration): dependencies = [ ('posts', '0009_auto_20201023_1643'), ] operations = [ migrations.AlterField(", "16:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0009_auto_20201023_1643'),", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0009_auto_20201023_1643'), ] operations =", "on 2020-11-03 16:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "2020-11-03 16:39 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts',", "by Django 2.2.6 on 2020-11-03 16:39 from django.db import migrations, models class Migration(migrations.Migration):", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0009_auto_20201023_1643'), ]", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0009_auto_20201023_1643'), ] operations", "class Migration(migrations.Migration): dependencies = [ ('posts', '0009_auto_20201023_1643'), ] operations = [ migrations.AlterField( model_name='post',", "dependencies = [ ('posts', '0009_auto_20201023_1643'), ] operations = [ migrations.AlterField( model_name='post', name='title', field=models.CharField(help_text='Назовите", "# Generated by Django 2.2.6 on 2020-11-03 16:39 from django.db import migrations, models", "Migration(migrations.Migration): dependencies = [ ('posts', '0009_auto_20201023_1643'), ] operations = [ migrations.AlterField( model_name='post', name='title',", "'0009_auto_20201023_1643'), ] operations = [ migrations.AlterField( model_name='post', name='title', field=models.CharField(help_text='Назовите пост', max_length=200, verbose_name='Название поста'),", "migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0009_auto_20201023_1643'), ] operations = [" ]
[ "may define additional fields or config shared across requests pass class RefreshTokenRequest(BaseRequest): refresh_token:", "# may define additional fields or config shared across requests pass class RefreshTokenRequest(BaseRequest):", "config shared across requests pass class RefreshTokenRequest(BaseRequest): refresh_token: str class UserUpdatePasswordRequest(BaseRequest): password: str", "EmailStr class BaseRequest(BaseModel): # may define additional fields or config shared across requests", "across requests pass class RefreshTokenRequest(BaseRequest): refresh_token: str class UserUpdatePasswordRequest(BaseRequest): password: str class UserCreateRequest(BaseRequest):", "additional fields or config shared across requests pass class RefreshTokenRequest(BaseRequest): refresh_token: str class", "<reponame>rafsaf/respo-fastapi-template<gh_stars>10-100 from pydantic import BaseModel, EmailStr class BaseRequest(BaseModel): # may define additional fields", "import BaseModel, EmailStr class BaseRequest(BaseModel): # may define additional fields or config shared", "BaseRequest(BaseModel): # may define additional fields or config shared across requests pass class", "pydantic import BaseModel, EmailStr class BaseRequest(BaseModel): # may define additional fields or config", "requests pass class RefreshTokenRequest(BaseRequest): refresh_token: str class UserUpdatePasswordRequest(BaseRequest): password: str class UserCreateRequest(BaseRequest): email:", "pass class RefreshTokenRequest(BaseRequest): refresh_token: str class UserUpdatePasswordRequest(BaseRequest): password: str class UserCreateRequest(BaseRequest): email: EmailStr", "BaseModel, EmailStr class BaseRequest(BaseModel): # may define additional fields or config shared across", "class RefreshTokenRequest(BaseRequest): refresh_token: str class UserUpdatePasswordRequest(BaseRequest): password: str class UserCreateRequest(BaseRequest): email: EmailStr password:", "shared across requests pass class RefreshTokenRequest(BaseRequest): refresh_token: str class UserUpdatePasswordRequest(BaseRequest): password: str class", "fields or config shared across requests pass class RefreshTokenRequest(BaseRequest): refresh_token: str class UserUpdatePasswordRequest(BaseRequest):", "class BaseRequest(BaseModel): # may define additional fields or config shared across requests pass", "or config shared across requests pass class RefreshTokenRequest(BaseRequest): refresh_token: str class UserUpdatePasswordRequest(BaseRequest): password:", "from pydantic import BaseModel, EmailStr class BaseRequest(BaseModel): # may define additional fields or", "define additional fields or config shared across requests pass class RefreshTokenRequest(BaseRequest): refresh_token: str", "RefreshTokenRequest(BaseRequest): refresh_token: str class UserUpdatePasswordRequest(BaseRequest): password: str class UserCreateRequest(BaseRequest): email: EmailStr password: str" ]
[ "asyncio import StreamReader, StreamWriter, AbstractEventLoop, \\ WriteTransport, Task, BaseTransport from io import BytesIO", "= self.loop.create_task(self.controller(self.reader, self.writer)) def connection_lost(self, exc): self.reader.feed_eof() def data_received(self, data): self.reader.feed_data(data) async def", "__init__(self, reader: StreamReader) -> None: self.reader = reader self.buffer = BitStream() self.total_bytes =", "= True class RTMPProtocol(asyncio.Protocol): def __init__(self, controller, loop: AbstractEventLoop) -> None: self.loop: AbstractEventLoop", "self.reader = reader self.buffer = BitStream() self.total_bytes = 0 super().__init__() async def read(self,", "def close(self) -> None: self._closing = True self._closed = True class RTMPProtocol(asyncio.Protocol): def", "- self.buffer.pos) self.total_bytes += length value = self.buffer.read(fmt) del self.buffer[:length] self.buffer.bitpos = 0", "StreamReader(loop=self.loop) self.writer = StreamWriter(transport, self, self.reader, self.loop) self.task = self.loop.create_task(self.controller(self.reader, self.writer)) def connection_lost(self,", "length is not None bit_needed = int(length) - (self.buffer.length - self.buffer.pos) while bit_needed", "StreamReader, StreamWriter, AbstractEventLoop, \\ WriteTransport, Task, BaseTransport from io import BytesIO from typing", "class StreamClosedException(Exception): pass class FIFOStream: def __init__(self, reader: StreamReader) -> None: self.reader =", "None bit_needed = int(length) - (self.buffer.length - self.buffer.pos) while bit_needed > 0: new_data", "0 return value class BufferedWriteTransport(WriteTransport): def __init__(self, buffer: BytesIO, extra: Optional[Mapping[Any, Any]] =", "return self._closing is True or self._closed is True def close(self) -> None: self._closing", "tokenparser, BitStream def random_byte_array(size: int) -> bytes: return os.urandom(size) class StreamClosedException(Exception): pass class", "import BytesIO from typing import Any, List, Optional, Mapping from bitstring import tokenparser,", "await self.reader.read(4096) if len(new_data) == 0: raise StreamClosedException() self.buffer.append(new_data) bit_needed = int(length) -", "self.task = self.loop.create_task(self.controller(self.reader, self.writer)) def connection_lost(self, exc): self.reader.feed_eof() def data_received(self, data): self.reader.feed_data(data) async", "StreamReader) -> None: self.reader = reader self.buffer = BitStream() self.total_bytes = 0 super().__init__()", "read(self, fmt): _, token = tokenparser(fmt) assert len(token) == 1 name, length, _", "def get_write_buffer_size(self) -> int: raise NotImplementedError def write(self, data: Any) -> None: self._buffer.write(data)", "self._closing = True self._closed = True class RTMPProtocol(asyncio.Protocol): def __init__(self, controller, loop: AbstractEventLoop)", "AbstractEventLoop) -> None: self.loop: AbstractEventLoop = loop self.transport: BaseTransport = None self.reader: StreamReader", "self.buffer = BitStream() self.total_bytes = 0 super().__init__() async def read(self, fmt): _, token", "data): self.reader.feed_data(data) async def _drain_helper(self): pass async def _get_close_waiter(self, stream: StreamWriter): return self.task", "StreamWriter(transport, self, self.reader, self.loop) self.task = self.loop.create_task(self.controller(self.reader, self.writer)) def connection_lost(self, exc): self.reader.feed_eof() def", "BytesIO from typing import Any, List, Optional, Mapping from bitstring import tokenparser, BitStream", "= reader self.buffer = BitStream() self.total_bytes = 0 super().__init__() async def read(self, fmt):", "os.urandom(size) class StreamClosedException(Exception): pass class FIFOStream: def __init__(self, reader: StreamReader) -> None: self.reader", "typing import Any, List, Optional, Mapping from bitstring import tokenparser, BitStream def random_byte_array(size:", "None: raise NotImplementedError def is_closing(self) -> bool: return self._closing is True or self._closed", "True class RTMPProtocol(asyncio.Protocol): def __init__(self, controller, loop: AbstractEventLoop) -> None: self.loop: AbstractEventLoop =", "from bitstring import tokenparser, BitStream def random_byte_array(size: int) -> bytes: return os.urandom(size) class", "def read(self, fmt): _, token = tokenparser(fmt) assert len(token) == 1 name, length,", "Optional[Mapping[Any, Any]] = ...) -> None: self._buffer = buffer self._closing = False self._closed", "= tokenparser(fmt) assert len(token) == 1 name, length, _ = token[0] assert length", "int(length) - (self.buffer.length - self.buffer.pos) self.total_bytes += length value = self.buffer.read(fmt) del self.buffer[:length]", "self.reader.read(4096) if len(new_data) == 0: raise StreamClosedException() self.buffer.append(new_data) bit_needed = int(length) - (self.buffer.length", "-> None: raise NotImplementedError def write_eof(self) -> None: raise NotImplementedError def can_write_eof(self) ->", "raise NotImplementedError def get_write_buffer_size(self) -> int: raise NotImplementedError def write(self, data: Any) ->", "self.writer: StreamWriter = None self.controller = controller self.task: Task = None super().__init__() def", "Any) -> None: self._buffer.write(data) def writelines(self, list_of_data: List[Any]) -> None: raise NotImplementedError def", "None: self.loop: AbstractEventLoop = loop self.transport: BaseTransport = None self.reader: StreamReader = None", "def connection_lost(self, exc): self.reader.feed_eof() def data_received(self, data): self.reader.feed_data(data) async def _drain_helper(self): pass async", "+= length value = self.buffer.read(fmt) del self.buffer[:length] self.buffer.bitpos = 0 return value class", "data: Any) -> None: self._buffer.write(data) def writelines(self, list_of_data: List[Any]) -> None: raise NotImplementedError", "StreamClosedException(Exception): pass class FIFOStream: def __init__(self, reader: StreamReader) -> None: self.reader = reader", "def write(self, data: Any) -> None: self._buffer.write(data) def writelines(self, list_of_data: List[Any]) -> None:", "_, token = tokenparser(fmt) assert len(token) == 1 name, length, _ = token[0]", "None: self.reader = reader self.buffer = BitStream() self.total_bytes = 0 super().__init__() async def", "Optional[int] = ...) -> None: raise NotImplementedError def get_write_buffer_size(self) -> int: raise NotImplementedError", "1 name, length, _ = token[0] assert length is not None bit_needed =", "random_byte_array(size: int) -> bytes: return os.urandom(size) class StreamClosedException(Exception): pass class FIFOStream: def __init__(self,", "False self._closed = False super().__init__(extra) def set_write_buffer_limits(self, high: Optional[int] = ..., low: Optional[int]", "token[0] assert length is not None bit_needed = int(length) - (self.buffer.length - self.buffer.pos)", "BytesIO, extra: Optional[Mapping[Any, Any]] = ...) -> None: self._buffer = buffer self._closing =", "NotImplementedError def can_write_eof(self) -> bool: return False def abort(self) -> None: raise NotImplementedError", "close(self) -> None: self._closing = True self._closed = True class RTMPProtocol(asyncio.Protocol): def __init__(self,", "self, self.reader, self.loop) self.task = self.loop.create_task(self.controller(self.reader, self.writer)) def connection_lost(self, exc): self.reader.feed_eof() def data_received(self,", "AbstractEventLoop, \\ WriteTransport, Task, BaseTransport from io import BytesIO from typing import Any,", "= controller self.task: Task = None super().__init__() def connection_made(self, transport): self.reader = StreamReader(loop=self.loop)", "buffer self._closing = False self._closed = False super().__init__(extra) def set_write_buffer_limits(self, high: Optional[int] =", "__init__(self, buffer: BytesIO, extra: Optional[Mapping[Any, Any]] = ...) -> None: self._buffer = buffer", "def __init__(self, controller, loop: AbstractEventLoop) -> None: self.loop: AbstractEventLoop = loop self.transport: BaseTransport", "io import BytesIO from typing import Any, List, Optional, Mapping from bitstring import", "writelines(self, list_of_data: List[Any]) -> None: raise NotImplementedError def write_eof(self) -> None: raise NotImplementedError", "None super().__init__() def connection_made(self, transport): self.reader = StreamReader(loop=self.loop) self.writer = StreamWriter(transport, self, self.reader,", "super().__init__() def connection_made(self, transport): self.reader = StreamReader(loop=self.loop) self.writer = StreamWriter(transport, self, self.reader, self.loop)", "(self.buffer.length - self.buffer.pos) while bit_needed > 0: new_data = await self.reader.read(4096) if len(new_data)", "is True or self._closed is True def close(self) -> None: self._closing = True", "pass class FIFOStream: def __init__(self, reader: StreamReader) -> None: self.reader = reader self.buffer", "= buffer self._closing = False self._closed = False super().__init__(extra) def set_write_buffer_limits(self, high: Optional[int]", "import Any, List, Optional, Mapping from bitstring import tokenparser, BitStream def random_byte_array(size: int)", "int(length) - (self.buffer.length - self.buffer.pos) while bit_needed > 0: new_data = await self.reader.read(4096)", "-> bool: return self._closing is True or self._closed is True def close(self) ->", "value class BufferedWriteTransport(WriteTransport): def __init__(self, buffer: BytesIO, extra: Optional[Mapping[Any, Any]] = ...) ->", "None: self._buffer = buffer self._closing = False self._closed = False super().__init__(extra) def set_write_buffer_limits(self,", "raise NotImplementedError def write_eof(self) -> None: raise NotImplementedError def can_write_eof(self) -> bool: return", "def __init__(self, buffer: BytesIO, extra: Optional[Mapping[Any, Any]] = ...) -> None: self._buffer =", "= ...) -> None: self._buffer = buffer self._closing = False self._closed = False", "class BufferedWriteTransport(WriteTransport): def __init__(self, buffer: BytesIO, extra: Optional[Mapping[Any, Any]] = ...) -> None:", "from io import BytesIO from typing import Any, List, Optional, Mapping from bitstring", "= ...) -> None: raise NotImplementedError def get_write_buffer_size(self) -> int: raise NotImplementedError def", "-> int: raise NotImplementedError def write(self, data: Any) -> None: self._buffer.write(data) def writelines(self,", "= 0 super().__init__() async def read(self, fmt): _, token = tokenparser(fmt) assert len(token)", "assert length is not None bit_needed = int(length) - (self.buffer.length - self.buffer.pos) while", "length, _ = token[0] assert length is not None bit_needed = int(length) -", "self.writer)) def connection_lost(self, exc): self.reader.feed_eof() def data_received(self, data): self.reader.feed_data(data) async def _drain_helper(self): pass", "NotImplementedError def write_eof(self) -> None: raise NotImplementedError def can_write_eof(self) -> bool: return False", "def abort(self) -> None: raise NotImplementedError def is_closing(self) -> bool: return self._closing is", "- (self.buffer.length - self.buffer.pos) self.total_bytes += length value = self.buffer.read(fmt) del self.buffer[:length] self.buffer.bitpos", "reader: StreamReader) -> None: self.reader = reader self.buffer = BitStream() self.total_bytes = 0", "True self._closed = True class RTMPProtocol(asyncio.Protocol): def __init__(self, controller, loop: AbstractEventLoop) -> None:", "connection_made(self, transport): self.reader = StreamReader(loop=self.loop) self.writer = StreamWriter(transport, self, self.reader, self.loop) self.task =", "== 1 name, length, _ = token[0] assert length is not None bit_needed", "-> None: raise NotImplementedError def can_write_eof(self) -> bool: return False def abort(self) ->", "None: raise NotImplementedError def get_write_buffer_size(self) -> int: raise NotImplementedError def write(self, data: Any)", "list_of_data: List[Any]) -> None: raise NotImplementedError def write_eof(self) -> None: raise NotImplementedError def", "None self.controller = controller self.task: Task = None super().__init__() def connection_made(self, transport): self.reader", "if len(new_data) == 0: raise StreamClosedException() self.buffer.append(new_data) bit_needed = int(length) - (self.buffer.length -", "= True self._closed = True class RTMPProtocol(asyncio.Protocol): def __init__(self, controller, loop: AbstractEventLoop) ->", "tokenparser(fmt) assert len(token) == 1 name, length, _ = token[0] assert length is", "Mapping from bitstring import tokenparser, BitStream def random_byte_array(size: int) -> bytes: return os.urandom(size)", "= loop self.transport: BaseTransport = None self.reader: StreamReader = None self.writer: StreamWriter =", "= StreamReader(loop=self.loop) self.writer = StreamWriter(transport, self, self.reader, self.loop) self.task = self.loop.create_task(self.controller(self.reader, self.writer)) def", "raise NotImplementedError def write(self, data: Any) -> None: self._buffer.write(data) def writelines(self, list_of_data: List[Any])", "return os.urandom(size) class StreamClosedException(Exception): pass class FIFOStream: def __init__(self, reader: StreamReader) -> None:", "self.buffer[:length] self.buffer.bitpos = 0 return value class BufferedWriteTransport(WriteTransport): def __init__(self, buffer: BytesIO, extra:", "None: self._buffer.write(data) def writelines(self, list_of_data: List[Any]) -> None: raise NotImplementedError def write_eof(self) ->", "def writelines(self, list_of_data: List[Any]) -> None: raise NotImplementedError def write_eof(self) -> None: raise", "= 0 return value class BufferedWriteTransport(WriteTransport): def __init__(self, buffer: BytesIO, extra: Optional[Mapping[Any, Any]]", "def write_eof(self) -> None: raise NotImplementedError def can_write_eof(self) -> bool: return False def", "BufferedWriteTransport(WriteTransport): def __init__(self, buffer: BytesIO, extra: Optional[Mapping[Any, Any]] = ...) -> None: self._buffer", "self.loop: AbstractEventLoop = loop self.transport: BaseTransport = None self.reader: StreamReader = None self.writer:", "0: new_data = await self.reader.read(4096) if len(new_data) == 0: raise StreamClosedException() self.buffer.append(new_data) bit_needed", "async def read(self, fmt): _, token = tokenparser(fmt) assert len(token) == 1 name,", "from asyncio import StreamReader, StreamWriter, AbstractEventLoop, \\ WriteTransport, Task, BaseTransport from io import", "class RTMPProtocol(asyncio.Protocol): def __init__(self, controller, loop: AbstractEventLoop) -> None: self.loop: AbstractEventLoop = loop", "None self.reader: StreamReader = None self.writer: StreamWriter = None self.controller = controller self.task:", "abort(self) -> None: raise NotImplementedError def is_closing(self) -> bool: return self._closing is True", "def is_closing(self) -> bool: return self._closing is True or self._closed is True def", "RTMPProtocol(asyncio.Protocol): def __init__(self, controller, loop: AbstractEventLoop) -> None: self.loop: AbstractEventLoop = loop self.transport:", "None: raise NotImplementedError def write_eof(self) -> None: raise NotImplementedError def can_write_eof(self) -> bool:", "= int(length) - (self.buffer.length - self.buffer.pos) self.total_bytes += length value = self.buffer.read(fmt) del", "List, Optional, Mapping from bitstring import tokenparser, BitStream def random_byte_array(size: int) -> bytes:", "is not None bit_needed = int(length) - (self.buffer.length - self.buffer.pos) while bit_needed >", "True def close(self) -> None: self._closing = True self._closed = True class RTMPProtocol(asyncio.Protocol):", "self.loop) self.task = self.loop.create_task(self.controller(self.reader, self.writer)) def connection_lost(self, exc): self.reader.feed_eof() def data_received(self, data): self.reader.feed_data(data)", "-> None: self._buffer.write(data) def writelines(self, list_of_data: List[Any]) -> None: raise NotImplementedError def write_eof(self)", "Task = None super().__init__() def connection_made(self, transport): self.reader = StreamReader(loop=self.loop) self.writer = StreamWriter(transport,", "data_received(self, data): self.reader.feed_data(data) async def _drain_helper(self): pass async def _get_close_waiter(self, stream: StreamWriter): return", "bool: return False def abort(self) -> None: raise NotImplementedError def is_closing(self) -> bool:", "while bit_needed > 0: new_data = await self.reader.read(4096) if len(new_data) == 0: raise", "StreamReader = None self.writer: StreamWriter = None self.controller = controller self.task: Task =", "self.task: Task = None super().__init__() def connection_made(self, transport): self.reader = StreamReader(loop=self.loop) self.writer =", "int: raise NotImplementedError def write(self, data: Any) -> None: self._buffer.write(data) def writelines(self, list_of_data:", "import StreamReader, StreamWriter, AbstractEventLoop, \\ WriteTransport, Task, BaseTransport from io import BytesIO from", "self._closed is True def close(self) -> None: self._closing = True self._closed = True", "= None super().__init__() def connection_made(self, transport): self.reader = StreamReader(loop=self.loop) self.writer = StreamWriter(transport, self,", "bit_needed > 0: new_data = await self.reader.read(4096) if len(new_data) == 0: raise StreamClosedException()", "import os from asyncio import StreamReader, StreamWriter, AbstractEventLoop, \\ WriteTransport, Task, BaseTransport from", "Optional, Mapping from bitstring import tokenparser, BitStream def random_byte_array(size: int) -> bytes: return", "Any]] = ...) -> None: self._buffer = buffer self._closing = False self._closed =", "BaseTransport = None self.reader: StreamReader = None self.writer: StreamWriter = None self.controller =", "exc): self.reader.feed_eof() def data_received(self, data): self.reader.feed_data(data) async def _drain_helper(self): pass async def _get_close_waiter(self,", "self.writer = StreamWriter(transport, self, self.reader, self.loop) self.task = self.loop.create_task(self.controller(self.reader, self.writer)) def connection_lost(self, exc):", "write_eof(self) -> None: raise NotImplementedError def can_write_eof(self) -> bool: return False def abort(self)", "self.reader, self.loop) self.task = self.loop.create_task(self.controller(self.reader, self.writer)) def connection_lost(self, exc): self.reader.feed_eof() def data_received(self, data):", "= StreamWriter(transport, self, self.reader, self.loop) self.task = self.loop.create_task(self.controller(self.reader, self.writer)) def connection_lost(self, exc): self.reader.feed_eof()", "NotImplementedError def write(self, data: Any) -> None: self._buffer.write(data) def writelines(self, list_of_data: List[Any]) ->", "= await self.reader.read(4096) if len(new_data) == 0: raise StreamClosedException() self.buffer.append(new_data) bit_needed = int(length)", "fmt): _, token = tokenparser(fmt) assert len(token) == 1 name, length, _ =", "def random_byte_array(size: int) -> bytes: return os.urandom(size) class StreamClosedException(Exception): pass class FIFOStream: def", "bitstring import tokenparser, BitStream def random_byte_array(size: int) -> bytes: return os.urandom(size) class StreamClosedException(Exception):", "NotImplementedError def get_write_buffer_size(self) -> int: raise NotImplementedError def write(self, data: Any) -> None:", "BaseTransport from io import BytesIO from typing import Any, List, Optional, Mapping from", "_ = token[0] assert length is not None bit_needed = int(length) - (self.buffer.length", "low: Optional[int] = ...) -> None: raise NotImplementedError def get_write_buffer_size(self) -> int: raise", "-> None: raise NotImplementedError def get_write_buffer_size(self) -> int: raise NotImplementedError def write(self, data:", "length value = self.buffer.read(fmt) del self.buffer[:length] self.buffer.bitpos = 0 return value class BufferedWriteTransport(WriteTransport):", "def data_received(self, data): self.reader.feed_data(data) async def _drain_helper(self): pass async def _get_close_waiter(self, stream: StreamWriter):", "= ..., low: Optional[int] = ...) -> None: raise NotImplementedError def get_write_buffer_size(self) ->", "NotImplementedError def is_closing(self) -> bool: return self._closing is True or self._closed is True", "token = tokenparser(fmt) assert len(token) == 1 name, length, _ = token[0] assert", "def can_write_eof(self) -> bool: return False def abort(self) -> None: raise NotImplementedError def", "or self._closed is True def close(self) -> None: self._closing = True self._closed =", "...) -> None: raise NotImplementedError def get_write_buffer_size(self) -> int: raise NotImplementedError def write(self,", "FIFOStream: def __init__(self, reader: StreamReader) -> None: self.reader = reader self.buffer = BitStream()", "BitStream def random_byte_array(size: int) -> bytes: return os.urandom(size) class StreamClosedException(Exception): pass class FIFOStream:", "self._closing = False self._closed = False super().__init__(extra) def set_write_buffer_limits(self, high: Optional[int] = ...,", "raise StreamClosedException() self.buffer.append(new_data) bit_needed = int(length) - (self.buffer.length - self.buffer.pos) self.total_bytes += length", "self.buffer.append(new_data) bit_needed = int(length) - (self.buffer.length - self.buffer.pos) self.total_bytes += length value =", "transport): self.reader = StreamReader(loop=self.loop) self.writer = StreamWriter(transport, self, self.reader, self.loop) self.task = self.loop.create_task(self.controller(self.reader,", "True or self._closed is True def close(self) -> None: self._closing = True self._closed", "write(self, data: Any) -> None: self._buffer.write(data) def writelines(self, list_of_data: List[Any]) -> None: raise", "False def abort(self) -> None: raise NotImplementedError def is_closing(self) -> bool: return self._closing", "self.reader: StreamReader = None self.writer: StreamWriter = None self.controller = controller self.task: Task", "def connection_made(self, transport): self.reader = StreamReader(loop=self.loop) self.writer = StreamWriter(transport, self, self.reader, self.loop) self.task", "False super().__init__(extra) def set_write_buffer_limits(self, high: Optional[int] = ..., low: Optional[int] = ...) ->", "self.total_bytes += length value = self.buffer.read(fmt) del self.buffer[:length] self.buffer.bitpos = 0 return value", "- self.buffer.pos) while bit_needed > 0: new_data = await self.reader.read(4096) if len(new_data) ==", "reader self.buffer = BitStream() self.total_bytes = 0 super().__init__() async def read(self, fmt): _,", "new_data = await self.reader.read(4096) if len(new_data) == 0: raise StreamClosedException() self.buffer.append(new_data) bit_needed =", "value = self.buffer.read(fmt) del self.buffer[:length] self.buffer.bitpos = 0 return value class BufferedWriteTransport(WriteTransport): def", "self._buffer.write(data) def writelines(self, list_of_data: List[Any]) -> None: raise NotImplementedError def write_eof(self) -> None:", "self.total_bytes = 0 super().__init__() async def read(self, fmt): _, token = tokenparser(fmt) assert", "AbstractEventLoop = loop self.transport: BaseTransport = None self.reader: StreamReader = None self.writer: StreamWriter", "import tokenparser, BitStream def random_byte_array(size: int) -> bytes: return os.urandom(size) class StreamClosedException(Exception): pass", "...) -> None: self._buffer = buffer self._closing = False self._closed = False super().__init__(extra)", "raise NotImplementedError def is_closing(self) -> bool: return self._closing is True or self._closed is", "set_write_buffer_limits(self, high: Optional[int] = ..., low: Optional[int] = ...) -> None: raise NotImplementedError", "bool: return self._closing is True or self._closed is True def close(self) -> None:", "Task, BaseTransport from io import BytesIO from typing import Any, List, Optional, Mapping", "name, length, _ = token[0] assert length is not None bit_needed = int(length)", "self._closed = True class RTMPProtocol(asyncio.Protocol): def __init__(self, controller, loop: AbstractEventLoop) -> None: self.loop:", "self.reader.feed_eof() def data_received(self, data): self.reader.feed_data(data) async def _drain_helper(self): pass async def _get_close_waiter(self, stream:", "-> None: raise NotImplementedError def is_closing(self) -> bool: return self._closing is True or", "= BitStream() self.total_bytes = 0 super().__init__() async def read(self, fmt): _, token =", "connection_lost(self, exc): self.reader.feed_eof() def data_received(self, data): self.reader.feed_data(data) async def _drain_helper(self): pass async def", "-> None: self.reader = reader self.buffer = BitStream() self.total_bytes = 0 super().__init__() async", "self.buffer.bitpos = 0 return value class BufferedWriteTransport(WriteTransport): def __init__(self, buffer: BytesIO, extra: Optional[Mapping[Any,", "None self.writer: StreamWriter = None self.controller = controller self.task: Task = None super().__init__()", "List[Any]) -> None: raise NotImplementedError def write_eof(self) -> None: raise NotImplementedError def can_write_eof(self)", "> 0: new_data = await self.reader.read(4096) if len(new_data) == 0: raise StreamClosedException() self.buffer.append(new_data)", "= None self.writer: StreamWriter = None self.controller = controller self.task: Task = None", "= token[0] assert length is not None bit_needed = int(length) - (self.buffer.length -", "super().__init__(extra) def set_write_buffer_limits(self, high: Optional[int] = ..., low: Optional[int] = ...) -> None:", "= False self._closed = False super().__init__(extra) def set_write_buffer_limits(self, high: Optional[int] = ..., low:", "self._closed = False super().__init__(extra) def set_write_buffer_limits(self, high: Optional[int] = ..., low: Optional[int] =", "self.buffer.pos) while bit_needed > 0: new_data = await self.reader.read(4096) if len(new_data) == 0:", "BitStream() self.total_bytes = 0 super().__init__() async def read(self, fmt): _, token = tokenparser(fmt)", "class FIFOStream: def __init__(self, reader: StreamReader) -> None: self.reader = reader self.buffer =", "bit_needed = int(length) - (self.buffer.length - self.buffer.pos) self.total_bytes += length value = self.buffer.read(fmt)", "return False def abort(self) -> None: raise NotImplementedError def is_closing(self) -> bool: return", "StreamWriter = None self.controller = controller self.task: Task = None super().__init__() def connection_made(self,", "None: raise NotImplementedError def can_write_eof(self) -> bool: return False def abort(self) -> None:", "self.reader = StreamReader(loop=self.loop) self.writer = StreamWriter(transport, self, self.reader, self.loop) self.task = self.loop.create_task(self.controller(self.reader, self.writer))", "asyncio import os from asyncio import StreamReader, StreamWriter, AbstractEventLoop, \\ WriteTransport, Task, BaseTransport", "from typing import Any, List, Optional, Mapping from bitstring import tokenparser, BitStream def", "not None bit_needed = int(length) - (self.buffer.length - self.buffer.pos) while bit_needed > 0:", "assert len(token) == 1 name, length, _ = token[0] assert length is not", "del self.buffer[:length] self.buffer.bitpos = 0 return value class BufferedWriteTransport(WriteTransport): def __init__(self, buffer: BytesIO,", "controller, loop: AbstractEventLoop) -> None: self.loop: AbstractEventLoop = loop self.transport: BaseTransport = None", "os from asyncio import StreamReader, StreamWriter, AbstractEventLoop, \\ WriteTransport, Task, BaseTransport from io", "len(token) == 1 name, length, _ = token[0] assert length is not None", "self.buffer.read(fmt) del self.buffer[:length] self.buffer.bitpos = 0 return value class BufferedWriteTransport(WriteTransport): def __init__(self, buffer:", "def set_write_buffer_limits(self, high: Optional[int] = ..., low: Optional[int] = ...) -> None: raise", "-> bool: return False def abort(self) -> None: raise NotImplementedError def is_closing(self) ->", "__init__(self, controller, loop: AbstractEventLoop) -> None: self.loop: AbstractEventLoop = loop self.transport: BaseTransport =", "len(new_data) == 0: raise StreamClosedException() self.buffer.append(new_data) bit_needed = int(length) - (self.buffer.length - self.buffer.pos)", "high: Optional[int] = ..., low: Optional[int] = ...) -> None: raise NotImplementedError def", "loop: AbstractEventLoop) -> None: self.loop: AbstractEventLoop = loop self.transport: BaseTransport = None self.reader:", "= None self.reader: StreamReader = None self.writer: StreamWriter = None self.controller = controller", "self.controller = controller self.task: Task = None super().__init__() def connection_made(self, transport): self.reader =", "controller self.task: Task = None super().__init__() def connection_made(self, transport): self.reader = StreamReader(loop=self.loop) self.writer", "int) -> bytes: return os.urandom(size) class StreamClosedException(Exception): pass class FIFOStream: def __init__(self, reader:", "self.transport: BaseTransport = None self.reader: StreamReader = None self.writer: StreamWriter = None self.controller", "buffer: BytesIO, extra: Optional[Mapping[Any, Any]] = ...) -> None: self._buffer = buffer self._closing", "-> None: self._closing = True self._closed = True class RTMPProtocol(asyncio.Protocol): def __init__(self, controller,", "== 0: raise StreamClosedException() self.buffer.append(new_data) bit_needed = int(length) - (self.buffer.length - self.buffer.pos) self.total_bytes", "return value class BufferedWriteTransport(WriteTransport): def __init__(self, buffer: BytesIO, extra: Optional[Mapping[Any, Any]] = ...)", "= False super().__init__(extra) def set_write_buffer_limits(self, high: Optional[int] = ..., low: Optional[int] = ...)", "None: self._closing = True self._closed = True class RTMPProtocol(asyncio.Protocol): def __init__(self, controller, loop:", "bit_needed = int(length) - (self.buffer.length - self.buffer.pos) while bit_needed > 0: new_data =", "= int(length) - (self.buffer.length - self.buffer.pos) while bit_needed > 0: new_data = await", "self._buffer = buffer self._closing = False self._closed = False super().__init__(extra) def set_write_buffer_limits(self, high:", "bytes: return os.urandom(size) class StreamClosedException(Exception): pass class FIFOStream: def __init__(self, reader: StreamReader) ->", "..., low: Optional[int] = ...) -> None: raise NotImplementedError def get_write_buffer_size(self) -> int:", "get_write_buffer_size(self) -> int: raise NotImplementedError def write(self, data: Any) -> None: self._buffer.write(data) def", "extra: Optional[Mapping[Any, Any]] = ...) -> None: self._buffer = buffer self._closing = False", "-> None: self._buffer = buffer self._closing = False self._closed = False super().__init__(extra) def", "\\ WriteTransport, Task, BaseTransport from io import BytesIO from typing import Any, List,", "self._closing is True or self._closed is True def close(self) -> None: self._closing =", "super().__init__() async def read(self, fmt): _, token = tokenparser(fmt) assert len(token) == 1", "(self.buffer.length - self.buffer.pos) self.total_bytes += length value = self.buffer.read(fmt) del self.buffer[:length] self.buffer.bitpos =", "self.buffer.pos) self.total_bytes += length value = self.buffer.read(fmt) del self.buffer[:length] self.buffer.bitpos = 0 return", "0 super().__init__() async def read(self, fmt): _, token = tokenparser(fmt) assert len(token) ==", "Optional[int] = ..., low: Optional[int] = ...) -> None: raise NotImplementedError def get_write_buffer_size(self)", "Any, List, Optional, Mapping from bitstring import tokenparser, BitStream def random_byte_array(size: int) ->", "StreamWriter, AbstractEventLoop, \\ WriteTransport, Task, BaseTransport from io import BytesIO from typing import", "- (self.buffer.length - self.buffer.pos) while bit_needed > 0: new_data = await self.reader.read(4096) if", "0: raise StreamClosedException() self.buffer.append(new_data) bit_needed = int(length) - (self.buffer.length - self.buffer.pos) self.total_bytes +=", "= self.buffer.read(fmt) del self.buffer[:length] self.buffer.bitpos = 0 return value class BufferedWriteTransport(WriteTransport): def __init__(self,", "loop self.transport: BaseTransport = None self.reader: StreamReader = None self.writer: StreamWriter = None", "self.loop.create_task(self.controller(self.reader, self.writer)) def connection_lost(self, exc): self.reader.feed_eof() def data_received(self, data): self.reader.feed_data(data) async def _drain_helper(self):", "= None self.controller = controller self.task: Task = None super().__init__() def connection_made(self, transport):", "-> None: self.loop: AbstractEventLoop = loop self.transport: BaseTransport = None self.reader: StreamReader =", "raise NotImplementedError def can_write_eof(self) -> bool: return False def abort(self) -> None: raise", "def __init__(self, reader: StreamReader) -> None: self.reader = reader self.buffer = BitStream() self.total_bytes", "is_closing(self) -> bool: return self._closing is True or self._closed is True def close(self)", "import asyncio import os from asyncio import StreamReader, StreamWriter, AbstractEventLoop, \\ WriteTransport, Task,", "WriteTransport, Task, BaseTransport from io import BytesIO from typing import Any, List, Optional,", "is True def close(self) -> None: self._closing = True self._closed = True class", "-> bytes: return os.urandom(size) class StreamClosedException(Exception): pass class FIFOStream: def __init__(self, reader: StreamReader)", "can_write_eof(self) -> bool: return False def abort(self) -> None: raise NotImplementedError def is_closing(self)", "StreamClosedException() self.buffer.append(new_data) bit_needed = int(length) - (self.buffer.length - self.buffer.pos) self.total_bytes += length value" ]
[ "return ord(char.lower())-97 def character_sum(a,b): new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97) def one_time_pad(plaintext, cypher): cyphertext", "def alphabet_position(char): return ord(char.lower())-97 def character_sum(a,b): new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97) def one_time_pad(plaintext,", "alphabet_position(char): return ord(char.lower())-97 def character_sum(a,b): new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97) def one_time_pad(plaintext, cypher):", "return chr(new_char_index+97) def one_time_pad(plaintext, cypher): cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in range(len(plaintext))]) return", "\"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in range(len(plaintext))]) return cyphertext if __name__ == \"__main__\": x=one_time_pad(\"hermes\",2) print(x)", "def character_sum(a,b): new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97) def one_time_pad(plaintext, cypher): cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)])", "def modulo_sum(x,y): return (x+y)%26 def alphabet_position(char): return ord(char.lower())-97 def character_sum(a,b): new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b))", "character_sum(a,b): new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97) def one_time_pad(plaintext, cypher): cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for", "def one_time_pad(plaintext, cypher): cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in range(len(plaintext))]) return cyphertext if", "return (x+y)%26 def alphabet_position(char): return ord(char.lower())-97 def character_sum(a,b): new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97)", "one_time_pad(plaintext, cypher): cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in range(len(plaintext))]) return cyphertext if __name__", "modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97) def one_time_pad(plaintext, cypher): cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in range(len(plaintext))])", "cypher): cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in range(len(plaintext))]) return cyphertext if __name__ ==", "new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97) def one_time_pad(plaintext, cypher): cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i", "= modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97) def one_time_pad(plaintext, cypher): cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in", "chr(new_char_index+97) def one_time_pad(plaintext, cypher): cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in range(len(plaintext))]) return cyphertext", "(x+y)%26 def alphabet_position(char): return ord(char.lower())-97 def character_sum(a,b): new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97) def", "ord(char.lower())-97 def character_sum(a,b): new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b)) return chr(new_char_index+97) def one_time_pad(plaintext, cypher): cyphertext =", "= \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in range(len(plaintext))]) return cyphertext if __name__ == \"__main__\": x=one_time_pad(\"hermes\",2)", "modulo_sum(x,y): return (x+y)%26 def alphabet_position(char): return ord(char.lower())-97 def character_sum(a,b): new_char_index = modulo_sum(alphabet_position(a),alphabet_position(b)) return", "cyphertext = \"\".join([character_sum(plaintext[i],cypher[i%len(cypher)]) for i in range(len(plaintext))]) return cyphertext if __name__ == \"__main__\":" ]
[ "= decdeg + (int(dec[1])/60.) + (float(dec[2])/3600.) ddec = flag*ddec return ddec if __name__", "any known pulsar, RRAT or FRB in the beam') parser.add_argument('-ra', dest = 'raq',", "required=True, help='DECJ in decimal degree') args = parser.parse_args() # Compare with catalog entries", "relevant information length = len(cat[\"entries\"]) outarr = [] for i in range(length): catra", "= [] for i in range(length): catra = cat[\"entries\"][i][\"RA\"] raobj = catra.split(\":\",3) catdec", "decobj = catdec.split(\":\",3) #check if there are blanks and fill them with zeros", "ra from hms to decimal degree # First convert to decimal hours dhrs", "== '__main__': # Parse command line arguments parser = argparse.ArgumentParser(description='Provide information about any", "degree') args = parser.parse_args() # Compare with catalog entries and print out relevant", "0.0 otherwise try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except: pass if cat[\"entries\"][i][\"sources\"][0][\"Name\"] == \"ATNF\": print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],cat[\"entries\"][i][\"sources\"][0][\"data\"][\"DM\"][0],cat[\"entries\"][i][\"sources\"][0][\"Name\"])", "degree ddeg = dhrs*15. return ddeg def dmstodeg(dec): # Convert dec from dms", "decdeg = abs(int(dec[0])) ddec = decdeg + (int(dec[1])/60.) + (float(dec[2])/3600.) ddec = flag*ddec", "for i in range(length): catra = cat[\"entries\"][i][\"RA\"] raobj = catra.split(\":\",3) catdec = cat[\"entries\"][i][\"DEC\"]", "degree') parser.add_argument('-dec', dest = 'decq', action = 'store', metavar = 'DECJ', required=True, help='DECJ", "action = 'store', metavar = 'DECJ', required=True, help='DECJ in decimal degree') args =", "the rtcat function cat = gen_catalog() # Some useful functions def hmstodeg(ra): #", "Some useful functions def hmstodeg(ra): # Convert ra from hms to decimal degree", "Convert ra from hms to decimal degree # First convert to decimal hours", "-1 decdeg = abs(int(dec[0])) ddec = decdeg + (int(dec[1])/60.) + (float(dec[2])/3600.) ddec =", "parser.parse_args() # Compare with catalog entries and print out relevant information length =", "with zeros if (len(raobj) < 3): if (len(raobj) == 2): raobj.append(\"0.0\") else: raobj.append(\"0\")", "if (len(raobj) < 3): if (len(raobj) == 2): raobj.append(\"0.0\") else: raobj.append(\"0\") raobj.append(\"0.0\") if", "- deccat) sep = ((dlra**2 + dldec**2)**0.5)*60. if (sep < 10.): #Print DM", "# Some useful functions def hmstodeg(ra): # Convert ra from hms to decimal", "beam') parser.add_argument('-ra', dest = 'raq', action = 'store', metavar = 'RAJ', required=True, help='RAJ", "catdec.split(\":\",3) #check if there are blanks and fill them with zeros if (len(raobj)", "#if i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now calculate separation in arcmin and put out", "help='RAJ in decimal degree') parser.add_argument('-dec', dest = 'decq', action = 'store', metavar =", "dest = 'decq', action = 'store', metavar = 'DECJ', required=True, help='DECJ in decimal", "if there are blanks and fill them with zeros if (len(raobj) < 3):", "< 10' racand = float(args.raq) deccand = float(args.decq) racat = hmstodeg(raobj) deccat =", "return ddeg def dmstodeg(dec): # Convert dec from dms to decimal degree #", "pulsar, RRAT or FRB in the beam') parser.add_argument('-ra', dest = 'raq', action =", "= 'RAJ', required=True, help='RAJ in decimal degree') parser.add_argument('-dec', dest = 'decq', action =", "dlra = abs(racand - racat) dldec = abs(deccand - deccat) sep = ((dlra**2", "flag = 1 if (int(dec[0]) < 0): flag = -1 decdeg = abs(int(dec[0]))", "zeros if (len(raobj) < 3): if (len(raobj) == 2): raobj.append(\"0.0\") else: raobj.append(\"0\") raobj.append(\"0.0\")", "= 'store', metavar = 'RAJ', required=True, help='RAJ in decimal degree') parser.add_argument('-dec', dest =", "if sep < 10' racand = float(args.raq) deccand = float(args.decq) racat = hmstodeg(raobj)", "== 2): decobj.append(\"0.0\") else: decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index is:\",i) #if i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) #", "= cat[\"entries\"][i][\"RA\"] raobj = catra.split(\":\",3) catdec = cat[\"entries\"][i][\"DEC\"] decobj = catdec.split(\":\",3) #check if", "hmstodeg(raobj) deccat = dmstodeg(decobj) dlra = abs(racand - racat) dldec = abs(deccand -", "= 'decq', action = 'store', metavar = 'DECJ', required=True, help='DECJ in decimal degree')", "argparse from catalog_utils import gen_catalog from webcrawler import * # Generate the catalog", "# Parse command line arguments parser = argparse.ArgumentParser(description='Provide information about any known pulsar,", "= catra.split(\":\",3) catdec = cat[\"entries\"][i][\"DEC\"] decobj = catdec.split(\":\",3) #check if there are blanks", "abs(deccand - deccat) sep = ((dlra**2 + dldec**2)**0.5)*60. if (sep < 10.): #Print", "NRAD and 0.0 otherwise try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except: pass if cat[\"entries\"][i][\"sources\"][0][\"Name\"] ==", "'DECJ', required=True, help='DECJ in decimal degree') args = parser.parse_args() # Compare with catalog", "arcmin and put out values if sep < 10' racand = float(args.raq) deccand", "= 1 if (int(dec[0]) < 0): flag = -1 decdeg = abs(int(dec[0])) ddec", "ddec if __name__ == '__main__': # Parse command line arguments parser = argparse.ArgumentParser(description='Provide", "calculate separation in arcmin and put out values if sep < 10' racand", "# Convert ra from hms to decimal degree # First convert to decimal", "sep < 10' racand = float(args.raq) deccand = float(args.decq) racat = hmstodeg(raobj) deccat", "useful functions def hmstodeg(ra): # Convert ra from hms to decimal degree #", "python3.5 __author__ = '<NAME>' __email__ = '<EMAIL>' import argparse from catalog_utils import gen_catalog", "in decimal degree') args = parser.parse_args() # Compare with catalog entries and print", "= '<EMAIL>' import argparse from catalog_utils import gen_catalog from webcrawler import * #", "raobj = catra.split(\":\",3) catdec = cat[\"entries\"][i][\"DEC\"] decobj = catdec.split(\":\",3) #check if there are", "if (len(decobj) < 3): if (len(decobj) == 2): decobj.append(\"0.0\") else: decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index", "otherwise try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except: pass if cat[\"entries\"][i][\"sources\"][0][\"Name\"] == \"ATNF\": print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],cat[\"entries\"][i][\"sources\"][0][\"data\"][\"DM\"][0],cat[\"entries\"][i][\"sources\"][0][\"Name\"]) else:", "sep = ((dlra**2 + dldec**2)**0.5)*60. if (sep < 10.): #Print DM if not", "# Now calculate separation in arcmin and put out values if sep <", "flag for positive or negative dec flag = 1 if (int(dec[0]) < 0):", "args = parser.parse_args() # Compare with catalog entries and print out relevant information", "+ dldec**2)**0.5)*60. if (sep < 10.): #Print DM if not NRAD and 0.0", "and put out values if sep < 10' racand = float(args.raq) deccand =", "__email__ = '<EMAIL>' import argparse from catalog_utils import gen_catalog from webcrawler import *", "the catalog using the rtcat function cat = gen_catalog() # Some useful functions", "(float(dec[2])/3600.) ddec = flag*ddec return ddec if __name__ == '__main__': # Parse command", "'raq', action = 'store', metavar = 'RAJ', required=True, help='RAJ in decimal degree') parser.add_argument('-dec',", "+ (float(dec[2])/3600.) ddec = flag*ddec return ddec if __name__ == '__main__': # Parse", "deccat) sep = ((dlra**2 + dldec**2)**0.5)*60. if (sep < 10.): #Print DM if", "dhrs = int(ra[0]) + (int(ra[1])/60.) + (float(ra[2])/3600.) # Now convert to decimal degree", "from dms to decimal degree # Generate a flag for positive or negative", "the beam') parser.add_argument('-ra', dest = 'raq', action = 'store', metavar = 'RAJ', required=True,", "cat[\"entries\"][i][\"RA\"] raobj = catra.split(\":\",3) catdec = cat[\"entries\"][i][\"DEC\"] decobj = catdec.split(\":\",3) #check if there", "out values if sep < 10' racand = float(args.raq) deccand = float(args.decq) racat", "using the rtcat function cat = gen_catalog() # Some useful functions def hmstodeg(ra):", "'__main__': # Parse command line arguments parser = argparse.ArgumentParser(description='Provide information about any known", "abs(racand - racat) dldec = abs(deccand - deccat) sep = ((dlra**2 + dldec**2)**0.5)*60.", "(sep < 10.): #Print DM if not NRAD and 0.0 otherwise try: if", "# Generate a flag for positive or negative dec flag = 1 if", "negative dec flag = 1 if (int(dec[0]) < 0): flag = -1 decdeg", "dec flag = 1 if (int(dec[0]) < 0): flag = -1 decdeg =", "3): if (len(decobj) == 2): decobj.append(\"0.0\") else: decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index is:\",i) #if i==343:", "if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except: pass if cat[\"entries\"][i][\"sources\"][0][\"Name\"] == \"ATNF\": print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],cat[\"entries\"][i][\"sources\"][0][\"data\"][\"DM\"][0],cat[\"entries\"][i][\"sources\"][0][\"Name\"]) else: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],cat[\"entries\"][i][\"sources\"][0][\"data\"][\"DM\"],cat[\"entries\"][i][\"sources\"][0][\"Name\"]) #outarr.append(cat[\"entries\"][i][\"sources\"][0][\"data\"])", "webcrawler import * # Generate the catalog using the rtcat function cat =", "# First convert to decimal hours dhrs = int(ra[0]) + (int(ra[1])/60.) + (float(ra[2])/3600.)", "catalog using the rtcat function cat = gen_catalog() # Some useful functions def", "__name__ == '__main__': # Parse command line arguments parser = argparse.ArgumentParser(description='Provide information about", "entries and print out relevant information length = len(cat[\"entries\"]) outarr = [] for", "= dmstodeg(decobj) dlra = abs(racand - racat) dldec = abs(deccand - deccat) sep", "'store', metavar = 'DECJ', required=True, help='DECJ in decimal degree') args = parser.parse_args() #", "raobj.append(\"0\") raobj.append(\"0.0\") if (len(decobj) < 3): if (len(decobj) == 2): decobj.append(\"0.0\") else: decobj.append(\"0\")", "a flag for positive or negative dec flag = 1 if (int(dec[0]) <", "if (len(decobj) == 2): decobj.append(\"0.0\") else: decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index is:\",i) #if i==343: #", "+ (int(ra[1])/60.) + (float(ra[2])/3600.) # Now convert to decimal degree ddeg = dhrs*15.", "= abs(racand - racat) dldec = abs(deccand - deccat) sep = ((dlra**2 +", "i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now calculate separation in arcmin and put out values", "import gen_catalog from webcrawler import * # Generate the catalog using the rtcat", "hours dhrs = int(ra[0]) + (int(ra[1])/60.) + (float(ra[2])/3600.) # Now convert to decimal", "import * # Generate the catalog using the rtcat function cat = gen_catalog()", "#!/usr/bin/env python3.5 __author__ = '<NAME>' __email__ = '<EMAIL>' import argparse from catalog_utils import", "def dmstodeg(dec): # Convert dec from dms to decimal degree # Generate a", "i in range(length): catra = cat[\"entries\"][i][\"RA\"] raobj = catra.split(\":\",3) catdec = cat[\"entries\"][i][\"DEC\"] decobj", "= argparse.ArgumentParser(description='Provide information about any known pulsar, RRAT or FRB in the beam')", "(int(dec[0]) < 0): flag = -1 decdeg = abs(int(dec[0])) ddec = decdeg +", "(len(decobj) == 2): decobj.append(\"0.0\") else: decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index is:\",i) #if i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"])", "to decimal degree # Generate a flag for positive or negative dec flag", "= hmstodeg(raobj) deccat = dmstodeg(decobj) dlra = abs(racand - racat) dldec = abs(deccand", "= parser.parse_args() # Compare with catalog entries and print out relevant information length", "and fill them with zeros if (len(raobj) < 3): if (len(raobj) == 2):", "raobj.append(\"0.0\") else: raobj.append(\"0\") raobj.append(\"0.0\") if (len(decobj) < 3): if (len(decobj) == 2): decobj.append(\"0.0\")", "'decq', action = 'store', metavar = 'DECJ', required=True, help='DECJ in decimal degree') args", "print out relevant information length = len(cat[\"entries\"]) outarr = [] for i in", "cat = gen_catalog() # Some useful functions def hmstodeg(ra): # Convert ra from", "metavar = 'DECJ', required=True, help='DECJ in decimal degree') args = parser.parse_args() # Compare", "- racat) dldec = abs(deccand - deccat) sep = ((dlra**2 + dldec**2)**0.5)*60. if", "cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except: pass if cat[\"entries\"][i][\"sources\"][0][\"Name\"] == \"ATNF\": print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],cat[\"entries\"][i][\"sources\"][0][\"data\"][\"DM\"][0],cat[\"entries\"][i][\"sources\"][0][\"Name\"]) else: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],cat[\"entries\"][i][\"sources\"][0][\"data\"][\"DM\"],cat[\"entries\"][i][\"sources\"][0][\"Name\"]) #outarr.append(cat[\"entries\"][i][\"sources\"][0][\"data\"]) #print(outarr)", "= -1 decdeg = abs(int(dec[0])) ddec = decdeg + (int(dec[1])/60.) + (float(dec[2])/3600.) ddec", "parser.add_argument('-ra', dest = 'raq', action = 'store', metavar = 'RAJ', required=True, help='RAJ in", "fill them with zeros if (len(raobj) < 3): if (len(raobj) == 2): raobj.append(\"0.0\")", "range(length): catra = cat[\"entries\"][i][\"RA\"] raobj = catra.split(\":\",3) catdec = cat[\"entries\"][i][\"DEC\"] decobj = catdec.split(\":\",3)", "== 2): raobj.append(\"0.0\") else: raobj.append(\"0\") raobj.append(\"0.0\") if (len(decobj) < 3): if (len(decobj) ==", "decimal degree') parser.add_argument('-dec', dest = 'decq', action = 'store', metavar = 'DECJ', required=True,", "dldec = abs(deccand - deccat) sep = ((dlra**2 + dldec**2)**0.5)*60. if (sep <", "= '<NAME>' __email__ = '<EMAIL>' import argparse from catalog_utils import gen_catalog from webcrawler", "abs(int(dec[0])) ddec = decdeg + (int(dec[1])/60.) + (float(dec[2])/3600.) ddec = flag*ddec return ddec", "to decimal degree # First convert to decimal hours dhrs = int(ra[0]) +", "command line arguments parser = argparse.ArgumentParser(description='Provide information about any known pulsar, RRAT or", "not NRAD and 0.0 otherwise try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except: pass if cat[\"entries\"][i][\"sources\"][0][\"Name\"]", "= abs(deccand - deccat) sep = ((dlra**2 + dldec**2)**0.5)*60. if (sep < 10.):", "dec from dms to decimal degree # Generate a flag for positive or", "= float(args.decq) racat = hmstodeg(raobj) deccat = dmstodeg(decobj) dlra = abs(racand - racat)", "else: decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index is:\",i) #if i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now calculate separation", "about any known pulsar, RRAT or FRB in the beam') parser.add_argument('-ra', dest =", "0): flag = -1 decdeg = abs(int(dec[0])) ddec = decdeg + (int(dec[1])/60.) +", "Compare with catalog entries and print out relevant information length = len(cat[\"entries\"]) outarr", "< 3): if (len(raobj) == 2): raobj.append(\"0.0\") else: raobj.append(\"0\") raobj.append(\"0.0\") if (len(decobj) <", "#Print DM if not NRAD and 0.0 otherwise try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except:", "catdec = cat[\"entries\"][i][\"DEC\"] decobj = catdec.split(\":\",3) #check if there are blanks and fill", "((dlra**2 + dldec**2)**0.5)*60. if (sep < 10.): #Print DM if not NRAD and", "in decimal degree') parser.add_argument('-dec', dest = 'decq', action = 'store', metavar = 'DECJ',", "gen_catalog() # Some useful functions def hmstodeg(ra): # Convert ra from hms to", "decimal degree') args = parser.parse_args() # Compare with catalog entries and print out", "length = len(cat[\"entries\"]) outarr = [] for i in range(length): catra = cat[\"entries\"][i][\"RA\"]", "or negative dec flag = 1 if (int(dec[0]) < 0): flag = -1", "decobj.append(\"0.0\") else: decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index is:\",i) #if i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now calculate", "decdeg + (int(dec[1])/60.) + (float(dec[2])/3600.) ddec = flag*ddec return ddec if __name__ ==", "+ (float(ra[2])/3600.) # Now convert to decimal degree ddeg = dhrs*15. return ddeg", "if (int(dec[0]) < 0): flag = -1 decdeg = abs(int(dec[0])) ddec = decdeg", "* # Generate the catalog using the rtcat function cat = gen_catalog() #", "= abs(int(dec[0])) ddec = decdeg + (int(dec[1])/60.) + (float(dec[2])/3600.) ddec = flag*ddec return", "ddec = decdeg + (int(dec[1])/60.) + (float(dec[2])/3600.) ddec = flag*ddec return ddec if", "deccat = dmstodeg(decobj) dlra = abs(racand - racat) dldec = abs(deccand - deccat)", "parser = argparse.ArgumentParser(description='Provide information about any known pulsar, RRAT or FRB in the", "degree # First convert to decimal hours dhrs = int(ra[0]) + (int(ra[1])/60.) +", "2): decobj.append(\"0.0\") else: decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index is:\",i) #if i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now", "< 10.): #Print DM if not NRAD and 0.0 otherwise try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]:", "information length = len(cat[\"entries\"]) outarr = [] for i in range(length): catra =", "try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except: pass if cat[\"entries\"][i][\"sources\"][0][\"Name\"] == \"ATNF\": print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],cat[\"entries\"][i][\"sources\"][0][\"data\"][\"DM\"][0],cat[\"entries\"][i][\"sources\"][0][\"Name\"]) else: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],cat[\"entries\"][i][\"sources\"][0][\"data\"][\"DM\"],cat[\"entries\"][i][\"sources\"][0][\"Name\"])", "put out values if sep < 10' racand = float(args.raq) deccand = float(args.decq)", "< 0): flag = -1 decdeg = abs(int(dec[0])) ddec = decdeg + (int(dec[1])/60.)", "= flag*ddec return ddec if __name__ == '__main__': # Parse command line arguments", "= cat[\"entries\"][i][\"DEC\"] decobj = catdec.split(\":\",3) #check if there are blanks and fill them", "function cat = gen_catalog() # Some useful functions def hmstodeg(ra): # Convert ra", "10' racand = float(args.raq) deccand = float(args.decq) racat = hmstodeg(raobj) deccat = dmstodeg(decobj)", "hmstodeg(ra): # Convert ra from hms to decimal degree # First convert to", "or FRB in the beam') parser.add_argument('-ra', dest = 'raq', action = 'store', metavar", "dmstodeg(decobj) dlra = abs(racand - racat) dldec = abs(deccand - deccat) sep =", "them with zeros if (len(raobj) < 3): if (len(raobj) == 2): raobj.append(\"0.0\") else:", "< 3): if (len(decobj) == 2): decobj.append(\"0.0\") else: decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index is:\",i) #if", "convert to decimal degree ddeg = dhrs*15. return ddeg def dmstodeg(dec): # Convert", "known pulsar, RRAT or FRB in the beam') parser.add_argument('-ra', dest = 'raq', action", "argparse.ArgumentParser(description='Provide information about any known pulsar, RRAT or FRB in the beam') parser.add_argument('-ra',", "'RAJ', required=True, help='RAJ in decimal degree') parser.add_argument('-dec', dest = 'decq', action = 'store',", "dmstodeg(dec): # Convert dec from dms to decimal degree # Generate a flag", "for positive or negative dec flag = 1 if (int(dec[0]) < 0): flag", "#print(\"Index is:\",i) #if i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now calculate separation in arcmin and", "decimal hours dhrs = int(ra[0]) + (int(ra[1])/60.) + (float(ra[2])/3600.) # Now convert to", "Now calculate separation in arcmin and put out values if sep < 10'", "racat = hmstodeg(raobj) deccat = dmstodeg(decobj) dlra = abs(racand - racat) dldec =", "gen_catalog from webcrawler import * # Generate the catalog using the rtcat function", "help='DECJ in decimal degree') args = parser.parse_args() # Compare with catalog entries and", "= 'store', metavar = 'DECJ', required=True, help='DECJ in decimal degree') args = parser.parse_args()", "'<EMAIL>' import argparse from catalog_utils import gen_catalog from webcrawler import * # Generate", "values if sep < 10' racand = float(args.raq) deccand = float(args.decq) racat =", "are blanks and fill them with zeros if (len(raobj) < 3): if (len(raobj)", "separation in arcmin and put out values if sep < 10' racand =", "parser.add_argument('-dec', dest = 'decq', action = 'store', metavar = 'DECJ', required=True, help='DECJ in", "blanks and fill them with zeros if (len(raobj) < 3): if (len(raobj) ==", "return ddec if __name__ == '__main__': # Parse command line arguments parser =", "dms to decimal degree # Generate a flag for positive or negative dec", "(int(dec[1])/60.) + (float(dec[2])/3600.) ddec = flag*ddec return ddec if __name__ == '__main__': #", "from catalog_utils import gen_catalog from webcrawler import * # Generate the catalog using", "+ (int(dec[1])/60.) + (float(dec[2])/3600.) ddec = flag*ddec return ddec if __name__ == '__main__':", "arguments parser = argparse.ArgumentParser(description='Provide information about any known pulsar, RRAT or FRB in", "import argparse from catalog_utils import gen_catalog from webcrawler import * # Generate the", "= dhrs*15. return ddeg def dmstodeg(dec): # Convert dec from dms to decimal", "and print out relevant information length = len(cat[\"entries\"]) outarr = [] for i", "DM if not NRAD and 0.0 otherwise try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except: pass", "[] for i in range(length): catra = cat[\"entries\"][i][\"RA\"] raobj = catra.split(\":\",3) catdec =", "catalog entries and print out relevant information length = len(cat[\"entries\"]) outarr = []", "float(args.decq) racat = hmstodeg(raobj) deccat = dmstodeg(decobj) dlra = abs(racand - racat) dldec", "# Convert dec from dms to decimal degree # Generate a flag for", "Generate a flag for positive or negative dec flag = 1 if (int(dec[0])", "if (len(raobj) == 2): raobj.append(\"0.0\") else: raobj.append(\"0\") raobj.append(\"0.0\") if (len(decobj) < 3): if", "(len(raobj) == 2): raobj.append(\"0.0\") else: raobj.append(\"0\") raobj.append(\"0.0\") if (len(decobj) < 3): if (len(decobj)", "degree # Generate a flag for positive or negative dec flag = 1", "decobj.append(\"0.0\") #print(\"Index is:\",i) #if i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now calculate separation in arcmin", "= ((dlra**2 + dldec**2)**0.5)*60. if (sep < 10.): #Print DM if not NRAD", "decimal degree ddeg = dhrs*15. return ddeg def dmstodeg(dec): # Convert dec from", "in arcmin and put out values if sep < 10' racand = float(args.raq)", "with catalog entries and print out relevant information length = len(cat[\"entries\"]) outarr =", "int(ra[0]) + (int(ra[1])/60.) + (float(ra[2])/3600.) # Now convert to decimal degree ddeg =", "hms to decimal degree # First convert to decimal hours dhrs = int(ra[0])", "(len(raobj) < 3): if (len(raobj) == 2): raobj.append(\"0.0\") else: raobj.append(\"0\") raobj.append(\"0.0\") if (len(decobj)", "= int(ra[0]) + (int(ra[1])/60.) + (float(ra[2])/3600.) # Now convert to decimal degree ddeg", "in range(length): catra = cat[\"entries\"][i][\"RA\"] raobj = catra.split(\":\",3) catdec = cat[\"entries\"][i][\"DEC\"] decobj =", "'store', metavar = 'RAJ', required=True, help='RAJ in decimal degree') parser.add_argument('-dec', dest = 'decq',", "= 'DECJ', required=True, help='DECJ in decimal degree') args = parser.parse_args() # Compare with", "#check if there are blanks and fill them with zeros if (len(raobj) <", "there are blanks and fill them with zeros if (len(raobj) < 3): if", "racand = float(args.raq) deccand = float(args.decq) racat = hmstodeg(raobj) deccat = dmstodeg(decobj) dlra", "if not NRAD and 0.0 otherwise try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except: pass if", "flag*ddec return ddec if __name__ == '__main__': # Parse command line arguments parser", "if (sep < 10.): #Print DM if not NRAD and 0.0 otherwise try:", "__author__ = '<NAME>' __email__ = '<EMAIL>' import argparse from catalog_utils import gen_catalog from", "ddec = flag*ddec return ddec if __name__ == '__main__': # Parse command line", "= float(args.raq) deccand = float(args.decq) racat = hmstodeg(raobj) deccat = dmstodeg(decobj) dlra =", "raobj.append(\"0.0\") if (len(decobj) < 3): if (len(decobj) == 2): decobj.append(\"0.0\") else: decobj.append(\"0\") decobj.append(\"0.0\")", "rtcat function cat = gen_catalog() # Some useful functions def hmstodeg(ra): # Convert", "float(args.raq) deccand = float(args.decq) racat = hmstodeg(raobj) deccat = dmstodeg(decobj) dlra = abs(racand", "outarr = [] for i in range(length): catra = cat[\"entries\"][i][\"RA\"] raobj = catra.split(\":\",3)", "functions def hmstodeg(ra): # Convert ra from hms to decimal degree # First", "information about any known pulsar, RRAT or FRB in the beam') parser.add_argument('-ra', dest", "and 0.0 otherwise try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"]) except: pass if cat[\"entries\"][i][\"sources\"][0][\"Name\"] == \"ATNF\":", "dest = 'raq', action = 'store', metavar = 'RAJ', required=True, help='RAJ in decimal", "# Generate the catalog using the rtcat function cat = gen_catalog() # Some", "catra.split(\":\",3) catdec = cat[\"entries\"][i][\"DEC\"] decobj = catdec.split(\":\",3) #check if there are blanks and", "in the beam') parser.add_argument('-ra', dest = 'raq', action = 'store', metavar = 'RAJ',", "racat) dldec = abs(deccand - deccat) sep = ((dlra**2 + dldec**2)**0.5)*60. if (sep", "else: raobj.append(\"0\") raobj.append(\"0.0\") if (len(decobj) < 3): if (len(decobj) == 2): decobj.append(\"0.0\") else:", "# Now convert to decimal degree ddeg = dhrs*15. return ddeg def dmstodeg(dec):", "2): raobj.append(\"0.0\") else: raobj.append(\"0\") raobj.append(\"0.0\") if (len(decobj) < 3): if (len(decobj) == 2):", "RRAT or FRB in the beam') parser.add_argument('-ra', dest = 'raq', action = 'store',", "Parse command line arguments parser = argparse.ArgumentParser(description='Provide information about any known pulsar, RRAT", "from hms to decimal degree # First convert to decimal hours dhrs =", "'<NAME>' __email__ = '<EMAIL>' import argparse from catalog_utils import gen_catalog from webcrawler import", "decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index is:\",i) #if i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now calculate separation in", "dldec**2)**0.5)*60. if (sep < 10.): #Print DM if not NRAD and 0.0 otherwise", "Generate the catalog using the rtcat function cat = gen_catalog() # Some useful", "(float(ra[2])/3600.) # Now convert to decimal degree ddeg = dhrs*15. return ddeg def", "line arguments parser = argparse.ArgumentParser(description='Provide information about any known pulsar, RRAT or FRB", "<gh_stars>0 #!/usr/bin/env python3.5 __author__ = '<NAME>' __email__ = '<EMAIL>' import argparse from catalog_utils", "= catdec.split(\":\",3) #check if there are blanks and fill them with zeros if", "1 if (int(dec[0]) < 0): flag = -1 decdeg = abs(int(dec[0])) ddec =", "to decimal hours dhrs = int(ra[0]) + (int(ra[1])/60.) + (float(ra[2])/3600.) # Now convert", "out relevant information length = len(cat[\"entries\"]) outarr = [] for i in range(length):", "= len(cat[\"entries\"]) outarr = [] for i in range(length): catra = cat[\"entries\"][i][\"RA\"] raobj", "from webcrawler import * # Generate the catalog using the rtcat function cat", "positive or negative dec flag = 1 if (int(dec[0]) < 0): flag =", "def hmstodeg(ra): # Convert ra from hms to decimal degree # First convert", "print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now calculate separation in arcmin and put out values if sep", "= 'raq', action = 'store', metavar = 'RAJ', required=True, help='RAJ in decimal degree')", "FRB in the beam') parser.add_argument('-ra', dest = 'raq', action = 'store', metavar =", "metavar = 'RAJ', required=True, help='RAJ in decimal degree') parser.add_argument('-dec', dest = 'decq', action", "10.): #Print DM if not NRAD and 0.0 otherwise try: if cat[\"entries\"][i][\"sources\"][0][\"data\"][\"TYPE\"]: print('%.2f'%sep,cat[\"entries\"][i][\"Name\"],cat[\"entries\"][i][\"RA\"],cat[\"entries\"][i][\"DEC\"],\"0.0\",cat[\"entries\"][i][\"sources\"][0][\"Name\"])", "First convert to decimal hours dhrs = int(ra[0]) + (int(ra[1])/60.) + (float(ra[2])/3600.) #", "required=True, help='RAJ in decimal degree') parser.add_argument('-dec', dest = 'decq', action = 'store', metavar", "Convert dec from dms to decimal degree # Generate a flag for positive", "# Compare with catalog entries and print out relevant information length = len(cat[\"entries\"])", "(len(decobj) < 3): if (len(decobj) == 2): decobj.append(\"0.0\") else: decobj.append(\"0\") decobj.append(\"0.0\") #print(\"Index is:\",i)", "= gen_catalog() # Some useful functions def hmstodeg(ra): # Convert ra from hms", "decimal degree # First convert to decimal hours dhrs = int(ra[0]) + (int(ra[1])/60.)", "ddeg def dmstodeg(dec): # Convert dec from dms to decimal degree # Generate", "Now convert to decimal degree ddeg = dhrs*15. return ddeg def dmstodeg(dec): #", "to decimal degree ddeg = dhrs*15. return ddeg def dmstodeg(dec): # Convert dec", "dhrs*15. return ddeg def dmstodeg(dec): # Convert dec from dms to decimal degree", "3): if (len(raobj) == 2): raobj.append(\"0.0\") else: raobj.append(\"0\") raobj.append(\"0.0\") if (len(decobj) < 3):", "is:\",i) #if i==343: # print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now calculate separation in arcmin and put", "convert to decimal hours dhrs = int(ra[0]) + (int(ra[1])/60.) + (float(ra[2])/3600.) # Now", "catalog_utils import gen_catalog from webcrawler import * # Generate the catalog using the", "action = 'store', metavar = 'RAJ', required=True, help='RAJ in decimal degree') parser.add_argument('-dec', dest", "deccand = float(args.decq) racat = hmstodeg(raobj) deccat = dmstodeg(decobj) dlra = abs(racand -", "catra = cat[\"entries\"][i][\"RA\"] raobj = catra.split(\":\",3) catdec = cat[\"entries\"][i][\"DEC\"] decobj = catdec.split(\":\",3) #check", "ddeg = dhrs*15. return ddeg def dmstodeg(dec): # Convert dec from dms to", "if __name__ == '__main__': # Parse command line arguments parser = argparse.ArgumentParser(description='Provide information", "decimal degree # Generate a flag for positive or negative dec flag =", "len(cat[\"entries\"]) outarr = [] for i in range(length): catra = cat[\"entries\"][i][\"RA\"] raobj =", "(int(ra[1])/60.) + (float(ra[2])/3600.) # Now convert to decimal degree ddeg = dhrs*15. return", "# print(cat[\"entries\"][i][\"sources\"][0][\"data\"]) # Now calculate separation in arcmin and put out values if", "flag = -1 decdeg = abs(int(dec[0])) ddec = decdeg + (int(dec[1])/60.) + (float(dec[2])/3600.)", "cat[\"entries\"][i][\"DEC\"] decobj = catdec.split(\":\",3) #check if there are blanks and fill them with" ]
[ "else: i['top'] = False save_poster(data) def delete_poster(_id): data = get_posters() if len(data) ==", "save_poster(data) def delete_poster(_id): data = get_posters() if len(data) == 1: return data for", "return data for i in range(len(data)): if data[i]['id'] == _id: del data[i] break", "in data: if i['id'] == _id: i['top'] = True else: i['top'] = False", "False save_poster(data) def delete_poster(_id): data = get_posters() if len(data) == 1: return data", "= get_posters() post['id'] = string_to_md5(post['link']) if post['top']: for i in data: i['top'] =", "data = get_posters() for i in data: if i['id'] == _id: i['top'] =", "def get_posters(): with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f: data = json.load(f) return data['data']", "= True else: i['top'] = False save_poster(data) def delete_poster(_id): data = get_posters() if", "text: type: top: \"\"\" def get_posters(): with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f: data", "f) def add_poster(post:dict): data = get_posters() post['id'] = string_to_md5(post['link']) if post['top']: for i", "with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f: data = json.load(f) return data['data'] def save_poster(data):", "save_poster(data) return \"添加成功\" def set_as_top(_id): data = get_posters() for i in data: if", "i['id'] == _id: i['top'] = True else: i['top'] = False save_poster(data) def delete_poster(_id):", "data}, f) def add_poster(post:dict): data = get_posters() post['id'] = string_to_md5(post['link']) if post['top']: for", "from app.utils.mass import string_to_md5 \"\"\" cover: link: text: type: top: \"\"\" def get_posters():", "with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f: json.dump({'data': data}, f) def add_poster(post:dict): data =", "data for i in range(len(data)): if data[i]['id'] == _id: del data[i] break save_poster(data)", "data = get_posters() if len(data) == 1: return data for i in range(len(data)):", "post['id'] = string_to_md5(post['link']) if post['top']: for i in data: i['top'] = False #", "i in data: i['top'] = False # 否在在原数据上面追加 data.append(post) save_poster(data) return \"添加成功\" def", "in data: i['top'] = False # 否在在原数据上面追加 data.append(post) save_poster(data) return \"添加成功\" def set_as_top(_id):", "i['top'] = False # 否在在原数据上面追加 data.append(post) save_poster(data) return \"添加成功\" def set_as_top(_id): data =", "= False save_poster(data) def delete_poster(_id): data = get_posters() if len(data) == 1: return", "= get_posters() for i in data: if i['id'] == _id: i['top'] = True", "data: if i['id'] == _id: i['top'] = True else: i['top'] = False save_poster(data)", "app.utils.mass import string_to_md5 \"\"\" cover: link: text: type: top: \"\"\" def get_posters(): with", "def set_as_top(_id): data = get_posters() for i in data: if i['id'] == _id:", "string_to_md5 \"\"\" cover: link: text: type: top: \"\"\" def get_posters(): with open(os.path.join(DATA_PATH, 'poster.json'),", "post['top']: for i in data: i['top'] = False # 否在在原数据上面追加 data.append(post) save_poster(data) return", "1: return data for i in range(len(data)): if data[i]['id'] == _id: del data[i]", "for i in data: if i['id'] == _id: i['top'] = True else: i['top']", "import DATA_PATH from app.utils.mass import string_to_md5 \"\"\" cover: link: text: type: top: \"\"\"", "os import json from app.config import DATA_PATH from app.utils.mass import string_to_md5 \"\"\" cover:", "import os import json from app.config import DATA_PATH from app.utils.mass import string_to_md5 \"\"\"", "get_posters() if len(data) == 1: return data for i in range(len(data)): if data[i]['id']", "open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f: json.dump({'data': data}, f) def add_poster(post:dict): data = get_posters()", "return data['data'] def save_poster(data): with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f: json.dump({'data': data}, f)", "if post['top']: for i in data: i['top'] = False # 否在在原数据上面追加 data.append(post) save_poster(data)", "import json from app.config import DATA_PATH from app.utils.mass import string_to_md5 \"\"\" cover: link:", "f: data = json.load(f) return data['data'] def save_poster(data): with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as", "'r') as f: data = json.load(f) return data['data'] def save_poster(data): with open(os.path.join(DATA_PATH, 'poster.json'),", "app.config import DATA_PATH from app.utils.mass import string_to_md5 \"\"\" cover: link: text: type: top:", "delete_poster(_id): data = get_posters() if len(data) == 1: return data for i in", "json from app.config import DATA_PATH from app.utils.mass import string_to_md5 \"\"\" cover: link: text:", "get_posters(): with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f: data = json.load(f) return data['data'] def", "if len(data) == 1: return data for i in range(len(data)): if data[i]['id'] ==", "== 1: return data for i in range(len(data)): if data[i]['id'] == _id: del", "i in range(len(data)): if data[i]['id'] == _id: del data[i] break save_poster(data) return data", "_id: i['top'] = True else: i['top'] = False save_poster(data) def delete_poster(_id): data =", "\"\"\" cover: link: text: type: top: \"\"\" def get_posters(): with open(os.path.join(DATA_PATH, 'poster.json'), 'r')", "for i in range(len(data)): if data[i]['id'] == _id: del data[i] break save_poster(data) return", "= get_posters() if len(data) == 1: return data for i in range(len(data)): if", "as f: json.dump({'data': data}, f) def add_poster(post:dict): data = get_posters() post['id'] = string_to_md5(post['link'])", "否在在原数据上面追加 data.append(post) save_poster(data) return \"添加成功\" def set_as_top(_id): data = get_posters() for i in", "# 否在在原数据上面追加 data.append(post) save_poster(data) return \"添加成功\" def set_as_top(_id): data = get_posters() for i", "data.append(post) save_poster(data) return \"添加成功\" def set_as_top(_id): data = get_posters() for i in data:", "json.dump({'data': data}, f) def add_poster(post:dict): data = get_posters() post['id'] = string_to_md5(post['link']) if post['top']:", "json.load(f) return data['data'] def save_poster(data): with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f: json.dump({'data': data},", "if i['id'] == _id: i['top'] = True else: i['top'] = False save_poster(data) def", "len(data) == 1: return data for i in range(len(data)): if data[i]['id'] == _id:", "def delete_poster(_id): data = get_posters() if len(data) == 1: return data for i", "data = get_posters() post['id'] = string_to_md5(post['link']) if post['top']: for i in data: i['top']", "top: \"\"\" def get_posters(): with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f: data = json.load(f)", "data = json.load(f) return data['data'] def save_poster(data): with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f:", "'poster.json'), 'w') as f: json.dump({'data': data}, f) def add_poster(post:dict): data = get_posters() post['id']", "cover: link: text: type: top: \"\"\" def get_posters(): with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as", "= False # 否在在原数据上面追加 data.append(post) save_poster(data) return \"添加成功\" def set_as_top(_id): data = get_posters()", "False # 否在在原数据上面追加 data.append(post) save_poster(data) return \"添加成功\" def set_as_top(_id): data = get_posters() for", "= string_to_md5(post['link']) if post['top']: for i in data: i['top'] = False # 否在在原数据上面追加", "import string_to_md5 \"\"\" cover: link: text: type: top: \"\"\" def get_posters(): with open(os.path.join(DATA_PATH,", "def save_poster(data): with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f: json.dump({'data': data}, f) def add_poster(post:dict):", "= json.load(f) return data['data'] def save_poster(data): with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f: json.dump({'data':", "add_poster(post:dict): data = get_posters() post['id'] = string_to_md5(post['link']) if post['top']: for i in data:", "string_to_md5(post['link']) if post['top']: for i in data: i['top'] = False # 否在在原数据上面追加 data.append(post)", "data['data'] def save_poster(data): with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f: json.dump({'data': data}, f) def", "i['top'] = True else: i['top'] = False save_poster(data) def delete_poster(_id): data = get_posters()", "get_posters() post['id'] = string_to_md5(post['link']) if post['top']: for i in data: i['top'] = False", "True else: i['top'] = False save_poster(data) def delete_poster(_id): data = get_posters() if len(data)", "DATA_PATH from app.utils.mass import string_to_md5 \"\"\" cover: link: text: type: top: \"\"\" def", "data: i['top'] = False # 否在在原数据上面追加 data.append(post) save_poster(data) return \"添加成功\" def set_as_top(_id): data", "== _id: i['top'] = True else: i['top'] = False save_poster(data) def delete_poster(_id): data", "i['top'] = False save_poster(data) def delete_poster(_id): data = get_posters() if len(data) == 1:", "return \"添加成功\" def set_as_top(_id): data = get_posters() for i in data: if i['id']", "set_as_top(_id): data = get_posters() for i in data: if i['id'] == _id: i['top']", "get_posters() for i in data: if i['id'] == _id: i['top'] = True else:", "link: text: type: top: \"\"\" def get_posters(): with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f:", "from app.config import DATA_PATH from app.utils.mass import string_to_md5 \"\"\" cover: link: text: type:", "open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f: data = json.load(f) return data['data'] def save_poster(data): with", "\"\"\" def get_posters(): with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f: data = json.load(f) return", "'w') as f: json.dump({'data': data}, f) def add_poster(post:dict): data = get_posters() post['id'] =", "f: json.dump({'data': data}, f) def add_poster(post:dict): data = get_posters() post['id'] = string_to_md5(post['link']) if", "i in data: if i['id'] == _id: i['top'] = True else: i['top'] =", "save_poster(data): with open(os.path.join(DATA_PATH, 'poster.json'), 'w') as f: json.dump({'data': data}, f) def add_poster(post:dict): data", "def add_poster(post:dict): data = get_posters() post['id'] = string_to_md5(post['link']) if post['top']: for i in", "\"添加成功\" def set_as_top(_id): data = get_posters() for i in data: if i['id'] ==", "type: top: \"\"\" def get_posters(): with open(os.path.join(DATA_PATH, 'poster.json'), 'r') as f: data =", "'poster.json'), 'r') as f: data = json.load(f) return data['data'] def save_poster(data): with open(os.path.join(DATA_PATH,", "as f: data = json.load(f) return data['data'] def save_poster(data): with open(os.path.join(DATA_PATH, 'poster.json'), 'w')", "for i in data: i['top'] = False # 否在在原数据上面追加 data.append(post) save_poster(data) return \"添加成功\"" ]
[ "__future__ import print_function, unicode_literals import sys import os import traceback import time from", "import quote from requests.exceptions import ConnectionError import click from stable_world.py_helpers import platform_uname from", "except Exception: click.echo(\"Failed to write logfile\", err=True) original_excepthook(exctype, value, tb) def brief_excepthook(cache_dirname): \"\"\"", "if PY3: unicode = str def write_error_log(cache_dirname, exctype, value, tb): ''' Write the", "header = '[Unhandled Exception at {}] system={}, stable.world version: {}' print(header.format(time.ctime(), uname.system, version),", "exception to a the log file ''' logfile = os.path.join(cache_dirname, 'logs', 'debug.txt') try:", "issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) elif", "str def write_error_log(cache_dirname, exctype, value, tb): ''' Write the exception to a the", "'[Unhandled Exception at {}] system={}, stable.world version: {}' print(header.format(time.ctime(), uname.system, version), file=fd) tb", "err=True) click.echo(unicode(value), err=True) click.echo(err=True) click.echo('\\n Check for updates on this exception on the", "with open(logfile, 'w') as fd: uname = platform_uname() header = '[Unhandled Exception at", "ConnectionError): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo('Could not connect to url", "click.echo(' Or create a new issue:', err=True) click.echo(' ', nl=False, err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new',", "\"\\n\\n Critical! Unhandled Exception\\n {}: \".format(exctype.__name__) click.secho(msg, nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True)", "log file ''' logfile = os.path.join(cache_dirname, 'logs', 'debug.txt') try: with open(logfile, 'w') as", "issue:', err=True) click.echo(' ', nl=False, err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue', underline=True, err=True ) write_error_log(cache_dirname,", "bold=True, err=True) click.echo('Could not connect to url \"{}\"'.format(value.request.url), err=True) click.echo(err=True) else: msg =", "\".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) elif issubclass(exctype, ConnectionError): click.secho(\"\\n\\n {}:", "msg = \"\\n\\n Critical! Unhandled Exception\\n {}: \".format(exctype.__name__) click.secho(msg, nl=False, fg='red', bold=True, err=True)", "base class errors.UserError \"\"\" def inner(exctype, value, tb): if issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}:", "issubclass(exctype, ConnectionError): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo('Could not connect to", "{}: \".format(exctype.__name__) click.secho(msg, nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) click.echo('\\n Check for", "full traceback to \"{}\"\\n'.format(logfile), err=True) except Exception: click.echo(\"Failed to write logfile\", err=True) original_excepthook(exctype,", "print(header.format(time.ctime(), uname.system, version), file=fd) tb = '\\n'.join(traceback.format_exception(exctype, value, tb)) print(tb, file=fd) click.echo('\\n Wrote", "def inner(exctype, value, tb): if issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True,", "quote('is:issue {} \"{}\"'.format(exctype.__name__, value)) click.echo(' ', nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True, err=True )", "new issue:', err=True) click.echo(' ', nl=False, err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue', underline=True, err=True )", "nl=False, err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue', underline=True, err=True ) write_error_log(cache_dirname, exctype, value, tb) return", "from stable_world.py_helpers import PY3 if PY3: unicode = str def write_error_log(cache_dirname, exctype, value,", "click.echo('Could not connect to url \"{}\"'.format(value.request.url), err=True) click.echo(err=True) else: msg = \"\\n\\n Critical!", "errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) elif issubclass(exctype,", "class errors.UserError \"\"\" def inner(exctype, value, tb): if issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}: \".format(exctype.__name__),", "from requests.utils import quote from requests.exceptions import ConnectionError import click from stable_world.py_helpers import", "Exception: click.echo(\"Failed to write logfile\", err=True) original_excepthook(exctype, value, tb) def brief_excepthook(cache_dirname): \"\"\" Shorten", "sys import os import traceback import time from requests.utils import quote from requests.exceptions", "tb = '\\n'.join(traceback.format_exception(exctype, value, tb)) print(tb, file=fd) click.echo('\\n Wrote full traceback to \"{}\"\\n'.format(logfile),", "{}] system={}, stable.world version: {}' print(header.format(time.ctime(), uname.system, version), file=fd) tb = '\\n'.join(traceback.format_exception(exctype, value,", "exception on the issue tracker:') search_str = quote('is:issue {} \"{}\"'.format(exctype.__name__, value)) click.echo(' ',", "logfile = os.path.join(cache_dirname, 'logs', 'debug.txt') try: with open(logfile, 'w') as fd: uname =", "= os.path.join(cache_dirname, 'logs', 'debug.txt') try: with open(logfile, 'w') as fd: uname = platform_uname()", "version: {}' print(header.format(time.ctime(), uname.system, version), file=fd) tb = '\\n'.join(traceback.format_exception(exctype, value, tb)) print(tb, file=fd)", "click.echo(err=True) elif issubclass(exctype, ConnectionError): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo('Could not", "file=fd) click.echo('\\n Wrote full traceback to \"{}\"\\n'.format(logfile), err=True) except Exception: click.echo(\"Failed to write", "'\\n'.join(traceback.format_exception(exctype, value, tb)) print(tb, file=fd) click.echo('\\n Wrote full traceback to \"{}\"\\n'.format(logfile), err=True) except", "import sys import os import traceback import time from requests.utils import quote from", "click.secho(msg, nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) click.echo('\\n Check for updates on", "requests.exceptions import ConnectionError import click from stable_world.py_helpers import platform_uname from stable_world import __version__", "original_excepthook(exctype, value, tb) def brief_excepthook(cache_dirname): \"\"\" Shorten exeptions with the base class errors.UserError", "Unhandled Exception\\n {}: \".format(exctype.__name__) click.secho(msg, nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) click.echo('\\n", "the exception to a the log file ''' logfile = os.path.join(cache_dirname, 'logs', 'debug.txt')", "''' logfile = os.path.join(cache_dirname, 'logs', 'debug.txt') try: with open(logfile, 'w') as fd: uname", "value, tb): ''' Write the exception to a the log file ''' logfile", "not connect to url \"{}\"'.format(value.request.url), err=True) click.echo(err=True) else: msg = \"\\n\\n Critical! Unhandled", "platform_uname from stable_world import __version__ as version from stable_world import errors original_excepthook =", "value, tb): if issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo(unicode(value),", "the issue tracker:') search_str = quote('is:issue {} \"{}\"'.format(exctype.__name__, value)) click.echo(' ', nl=False) click.secho(", "ConnectionError import click from stable_world.py_helpers import platform_uname from stable_world import __version__ as version", "\"{}\"'.format(exctype.__name__, value)) click.echo(' ', nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True, err=True ) click.echo(' Or", "= sys.excepthook from stable_world.py_helpers import PY3 if PY3: unicode = str def write_error_log(cache_dirname,", "click.echo(\"Failed to write logfile\", err=True) original_excepthook(exctype, value, tb) def brief_excepthook(cache_dirname): \"\"\" Shorten exeptions", "def brief_excepthook(cache_dirname): \"\"\" Shorten exeptions with the base class errors.UserError \"\"\" def inner(exctype,", "fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) click.echo('\\n Check for updates on this exception", "click.echo('\\n Check for updates on this exception on the issue tracker:') search_str =", "as fd: uname = platform_uname() header = '[Unhandled Exception at {}] system={}, stable.world", "time from requests.utils import quote from requests.exceptions import ConnectionError import click from stable_world.py_helpers", "quote from requests.exceptions import ConnectionError import click from stable_world.py_helpers import platform_uname from stable_world", "logfile\", err=True) original_excepthook(exctype, value, tb) def brief_excepthook(cache_dirname): \"\"\" Shorten exeptions with the base", "err=True) click.echo(err=True) elif issubclass(exctype, ConnectionError): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo('Could", "stable_world import errors original_excepthook = sys.excepthook from stable_world.py_helpers import PY3 if PY3: unicode", "', nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True, err=True ) click.echo(' Or create a new", "{}' print(header.format(time.ctime(), uname.system, version), file=fd) tb = '\\n'.join(traceback.format_exception(exctype, value, tb)) print(tb, file=fd) click.echo('\\n", "write logfile\", err=True) original_excepthook(exctype, value, tb) def brief_excepthook(cache_dirname): \"\"\" Shorten exeptions with the", "err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue', underline=True, err=True ) write_error_log(cache_dirname, exctype, value, tb) return return", "on the issue tracker:') search_str = quote('is:issue {} \"{}\"'.format(exctype.__name__, value)) click.echo(' ', nl=False)", "fg='red', bold=True, err=True) click.echo('Could not connect to url \"{}\"'.format(value.request.url), err=True) click.echo(err=True) else: msg", "{}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo('Could not connect to url \"{}\"'.format(value.request.url), err=True)", "\".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo('Could not connect to url \"{}\"'.format(value.request.url), err=True) click.echo(err=True)", "stable_world import __version__ as version from stable_world import errors original_excepthook = sys.excepthook from", "stable_world.py_helpers import PY3 if PY3: unicode = str def write_error_log(cache_dirname, exctype, value, tb):", "click.echo(err=True) click.echo('\\n Check for updates on this exception on the issue tracker:') search_str", "url \"{}\"'.format(value.request.url), err=True) click.echo(err=True) else: msg = \"\\n\\n Critical! Unhandled Exception\\n {}: \".format(exctype.__name__)", "bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) elif issubclass(exctype, ConnectionError): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red',", "'logs', 'debug.txt') try: with open(logfile, 'w') as fd: uname = platform_uname() header =", "the log file ''' logfile = os.path.join(cache_dirname, 'logs', 'debug.txt') try: with open(logfile, 'w')", "sys.excepthook from stable_world.py_helpers import PY3 if PY3: unicode = str def write_error_log(cache_dirname, exctype,", "write_error_log(cache_dirname, exctype, value, tb): ''' Write the exception to a the log file", "tb): if issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True)", "tb): ''' Write the exception to a the log file ''' logfile =", "to write logfile\", err=True) original_excepthook(exctype, value, tb) def brief_excepthook(cache_dirname): \"\"\" Shorten exeptions with", "on this exception on the issue tracker:') search_str = quote('is:issue {} \"{}\"'.format(exctype.__name__, value))", "click.echo(unicode(value), err=True) click.echo(err=True) elif issubclass(exctype, ConnectionError): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True)", "if issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True)", "Check for updates on this exception on the issue tracker:') search_str = quote('is:issue", "os import traceback import time from requests.utils import quote from requests.exceptions import ConnectionError", "import PY3 if PY3: unicode = str def write_error_log(cache_dirname, exctype, value, tb): '''", "err=True) click.echo('Could not connect to url \"{}\"'.format(value.request.url), err=True) click.echo(err=True) else: msg = \"\\n\\n", "Wrote full traceback to \"{}\"\\n'.format(logfile), err=True) except Exception: click.echo(\"Failed to write logfile\", err=True)", "version), file=fd) tb = '\\n'.join(traceback.format_exception(exctype, value, tb)) print(tb, file=fd) click.echo('\\n Wrote full traceback", "PY3 if PY3: unicode = str def write_error_log(cache_dirname, exctype, value, tb): ''' Write", "Write the exception to a the log file ''' logfile = os.path.join(cache_dirname, 'logs',", "unicode_literals import sys import os import traceback import time from requests.utils import quote", "import traceback import time from requests.utils import quote from requests.exceptions import ConnectionError import", "from stable_world import errors original_excepthook = sys.excepthook from stable_world.py_helpers import PY3 if PY3:", "stable.world version: {}' print(header.format(time.ctime(), uname.system, version), file=fd) tb = '\\n'.join(traceback.format_exception(exctype, value, tb)) print(tb,", "nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) click.echo('\\n Check for updates on this", "os.path.join(cache_dirname, 'logs', 'debug.txt') try: with open(logfile, 'w') as fd: uname = platform_uname() header", "import print_function, unicode_literals import sys import os import traceback import time from requests.utils", "version from stable_world import errors original_excepthook = sys.excepthook from stable_world.py_helpers import PY3 if", "the base class errors.UserError \"\"\" def inner(exctype, value, tb): if issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n", "'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True, err=True ) click.echo(' Or create a new issue:', err=True) click.echo('", "from stable_world.py_helpers import platform_uname from stable_world import __version__ as version from stable_world import", "print_function, unicode_literals import sys import os import traceback import time from requests.utils import", "from stable_world import __version__ as version from stable_world import errors original_excepthook = sys.excepthook", "nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) elif issubclass(exctype, ConnectionError): click.secho(\"\\n\\n {}: \".format(exctype.__name__),", "fd: uname = platform_uname() header = '[Unhandled Exception at {}] system={}, stable.world version:", "platform_uname() header = '[Unhandled Exception at {}] system={}, stable.world version: {}' print(header.format(time.ctime(), uname.system,", "file=fd) tb = '\\n'.join(traceback.format_exception(exctype, value, tb)) print(tb, file=fd) click.echo('\\n Wrote full traceback to", "click.echo(err=True) else: msg = \"\\n\\n Critical! Unhandled Exception\\n {}: \".format(exctype.__name__) click.secho(msg, nl=False, fg='red',", "''' Write the exception to a the log file ''' logfile = os.path.join(cache_dirname,", "'w') as fd: uname = platform_uname() header = '[Unhandled Exception at {}] system={},", "else: msg = \"\\n\\n Critical! Unhandled Exception\\n {}: \".format(exctype.__name__) click.secho(msg, nl=False, fg='red', bold=True,", "import errors original_excepthook = sys.excepthook from stable_world.py_helpers import PY3 if PY3: unicode =", "= str def write_error_log(cache_dirname, exctype, value, tb): ''' Write the exception to a", "= '\\n'.join(traceback.format_exception(exctype, value, tb)) print(tb, file=fd) click.echo('\\n Wrote full traceback to \"{}\"\\n'.format(logfile), err=True)", "import click from stable_world.py_helpers import platform_uname from stable_world import __version__ as version from", "unicode = str def write_error_log(cache_dirname, exctype, value, tb): ''' Write the exception to", "import os import traceback import time from requests.utils import quote from requests.exceptions import", "err=True) except Exception: click.echo(\"Failed to write logfile\", err=True) original_excepthook(exctype, value, tb) def brief_excepthook(cache_dirname):", "{}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) elif issubclass(exctype, ConnectionError): click.secho(\"\\n\\n", "\"{}\"'.format(value.request.url), err=True) click.echo(err=True) else: msg = \"\\n\\n Critical! Unhandled Exception\\n {}: \".format(exctype.__name__) click.secho(msg,", "exctype, value, tb): ''' Write the exception to a the log file '''", "err=True) click.echo(err=True) click.echo('\\n Check for updates on this exception on the issue tracker:')", "PY3: unicode = str def write_error_log(cache_dirname, exctype, value, tb): ''' Write the exception", ") click.echo(' Or create a new issue:', err=True) click.echo(' ', nl=False, err=True) click.secho(", "as version from stable_world import errors original_excepthook = sys.excepthook from stable_world.py_helpers import PY3", "click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) elif issubclass(exctype, ConnectionError):", "__version__ as version from stable_world import errors original_excepthook = sys.excepthook from stable_world.py_helpers import", "tb) def brief_excepthook(cache_dirname): \"\"\" Shorten exeptions with the base class errors.UserError \"\"\" def", "from requests.exceptions import ConnectionError import click from stable_world.py_helpers import platform_uname from stable_world import", "open(logfile, 'w') as fd: uname = platform_uname() header = '[Unhandled Exception at {}]", "value, tb) def brief_excepthook(cache_dirname): \"\"\" Shorten exeptions with the base class errors.UserError \"\"\"", "import __version__ as version from stable_world import errors original_excepthook = sys.excepthook from stable_world.py_helpers", "traceback import time from requests.utils import quote from requests.exceptions import ConnectionError import click", "exeptions with the base class errors.UserError \"\"\" def inner(exctype, value, tb): if issubclass(exctype,", "err=True ) click.echo(' Or create a new issue:', err=True) click.echo(' ', nl=False, err=True)", "print(tb, file=fd) click.echo('\\n Wrote full traceback to \"{}\"\\n'.format(logfile), err=True) except Exception: click.echo(\"Failed to", "= quote('is:issue {} \"{}\"'.format(exctype.__name__, value)) click.echo(' ', nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True, err=True", "underline=True, err=True ) click.echo(' Or create a new issue:', err=True) click.echo(' ', nl=False,", "nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True, err=True ) click.echo(' Or create a new issue:',", "click.echo(unicode(value), err=True) click.echo(err=True) click.echo('\\n Check for updates on this exception on the issue", "\"\"\" def inner(exctype, value, tb): if issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red',", "for updates on this exception on the issue tracker:') search_str = quote('is:issue {}", "'debug.txt') try: with open(logfile, 'w') as fd: uname = platform_uname() header = '[Unhandled", "from __future__ import print_function, unicode_literals import sys import os import traceback import time", "fg='blue', underline=True, err=True ) click.echo(' Or create a new issue:', err=True) click.echo(' ',", "elif issubclass(exctype, ConnectionError): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo('Could not connect", "err=True) original_excepthook(exctype, value, tb) def brief_excepthook(cache_dirname): \"\"\" Shorten exeptions with the base class", "import ConnectionError import click from stable_world.py_helpers import platform_uname from stable_world import __version__ as", "value, tb)) print(tb, file=fd) click.echo('\\n Wrote full traceback to \"{}\"\\n'.format(logfile), err=True) except Exception:", "click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True, err=True ) click.echo(' Or create a new issue:', err=True)", "Critical! Unhandled Exception\\n {}: \".format(exctype.__name__) click.secho(msg, nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True)", "traceback to \"{}\"\\n'.format(logfile), err=True) except Exception: click.echo(\"Failed to write logfile\", err=True) original_excepthook(exctype, value,", "click.echo(' ', nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True, err=True ) click.echo(' Or create a", "fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) elif issubclass(exctype, ConnectionError): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False,", "Exception at {}] system={}, stable.world version: {}' print(header.format(time.ctime(), uname.system, version), file=fd) tb =", "\".format(exctype.__name__) click.secho(msg, nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) click.echo('\\n Check for updates", "brief_excepthook(cache_dirname): \"\"\" Shorten exeptions with the base class errors.UserError \"\"\" def inner(exctype, value,", "click.echo(' ', nl=False, err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue', underline=True, err=True ) write_error_log(cache_dirname, exctype, value,", "click from stable_world.py_helpers import platform_uname from stable_world import __version__ as version from stable_world", "Or create a new issue:', err=True) click.echo(' ', nl=False, err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue',", "import platform_uname from stable_world import __version__ as version from stable_world import errors original_excepthook", "errors original_excepthook = sys.excepthook from stable_world.py_helpers import PY3 if PY3: unicode = str", "{} \"{}\"'.format(exctype.__name__, value)) click.echo(' ', nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True, err=True ) click.echo('", "updates on this exception on the issue tracker:') search_str = quote('is:issue {} \"{}\"'.format(exctype.__name__,", "import time from requests.utils import quote from requests.exceptions import ConnectionError import click from", "\"{}\"\\n'.format(logfile), err=True) except Exception: click.echo(\"Failed to write logfile\", err=True) original_excepthook(exctype, value, tb) def", "= platform_uname() header = '[Unhandled Exception at {}] system={}, stable.world version: {}' print(header.format(time.ctime(),", "= \"\\n\\n Critical! Unhandled Exception\\n {}: \".format(exctype.__name__) click.secho(msg, nl=False, fg='red', bold=True, err=True) click.echo(unicode(value),", "err=True) click.echo(' ', nl=False, err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue', underline=True, err=True ) write_error_log(cache_dirname, exctype,", "a new issue:', err=True) click.echo(' ', nl=False, err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue', underline=True, err=True", "a the log file ''' logfile = os.path.join(cache_dirname, 'logs', 'debug.txt') try: with open(logfile,", "Exception\\n {}: \".format(exctype.__name__) click.secho(msg, nl=False, fg='red', bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) click.echo('\\n Check", "file ''' logfile = os.path.join(cache_dirname, 'logs', 'debug.txt') try: with open(logfile, 'w') as fd:", "to \"{}\"\\n'.format(logfile), err=True) except Exception: click.echo(\"Failed to write logfile\", err=True) original_excepthook(exctype, value, tb)", "tracker:') search_str = quote('is:issue {} \"{}\"'.format(exctype.__name__, value)) click.echo(' ', nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue',", "tb)) print(tb, file=fd) click.echo('\\n Wrote full traceback to \"{}\"\\n'.format(logfile), err=True) except Exception: click.echo(\"Failed", "click.echo('\\n Wrote full traceback to \"{}\"\\n'.format(logfile), err=True) except Exception: click.echo(\"Failed to write logfile\",", "click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True) click.echo('Could not connect to url \"{}\"'.format(value.request.url),", "click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue', underline=True, err=True ) write_error_log(cache_dirname, exctype, value, tb) return return inner", "to a the log file ''' logfile = os.path.join(cache_dirname, 'logs', 'debug.txt') try: with", "issue tracker:') search_str = quote('is:issue {} \"{}\"'.format(exctype.__name__, value)) click.echo(' ', nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str),", "errors.UserError \"\"\" def inner(exctype, value, tb): if issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False,", "def write_error_log(cache_dirname, exctype, value, tb): ''' Write the exception to a the log", "system={}, stable.world version: {}' print(header.format(time.ctime(), uname.system, version), file=fd) tb = '\\n'.join(traceback.format_exception(exctype, value, tb))", "nl=False, fg='red', bold=True, err=True) click.echo('Could not connect to url \"{}\"'.format(value.request.url), err=True) click.echo(err=True) else:", "try: with open(logfile, 'w') as fd: uname = platform_uname() header = '[Unhandled Exception", "Shorten exeptions with the base class errors.UserError \"\"\" def inner(exctype, value, tb): if", "stable_world.py_helpers import platform_uname from stable_world import __version__ as version from stable_world import errors", "\"\"\" Shorten exeptions with the base class errors.UserError \"\"\" def inner(exctype, value, tb):", "requests.utils import quote from requests.exceptions import ConnectionError import click from stable_world.py_helpers import platform_uname", "', nl=False, err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue', underline=True, err=True ) write_error_log(cache_dirname, exctype, value, tb)", "value)) click.echo(' ', nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True, err=True ) click.echo(' Or create", "with the base class errors.UserError \"\"\" def inner(exctype, value, tb): if issubclass(exctype, errors.BRIEF_ERRORS):", "at {}] system={}, stable.world version: {}' print(header.format(time.ctime(), uname.system, version), file=fd) tb = '\\n'.join(traceback.format_exception(exctype,", "this exception on the issue tracker:') search_str = quote('is:issue {} \"{}\"'.format(exctype.__name__, value)) click.echo('", "err=True) click.echo(err=True) else: msg = \"\\n\\n Critical! Unhandled Exception\\n {}: \".format(exctype.__name__) click.secho(msg, nl=False,", "original_excepthook = sys.excepthook from stable_world.py_helpers import PY3 if PY3: unicode = str def", "err=True) click.echo(unicode(value), err=True) click.echo(err=True) elif issubclass(exctype, ConnectionError): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True,", "to url \"{}\"'.format(value.request.url), err=True) click.echo(err=True) else: msg = \"\\n\\n Critical! Unhandled Exception\\n {}:", "= '[Unhandled Exception at {}] system={}, stable.world version: {}' print(header.format(time.ctime(), uname.system, version), file=fd)", "uname.system, version), file=fd) tb = '\\n'.join(traceback.format_exception(exctype, value, tb)) print(tb, file=fd) click.echo('\\n Wrote full", "bold=True, err=True) click.echo(unicode(value), err=True) click.echo(err=True) click.echo('\\n Check for updates on this exception on", "create a new issue:', err=True) click.echo(' ', nl=False, err=True) click.secho( 'https://github.com/srossross/stable.world/issues/new', fg='blue', underline=True,", "search_str = quote('is:issue {} \"{}\"'.format(exctype.__name__, value)) click.echo(' ', nl=False) click.secho( 'https://github.com/srossross/stable.world/issues?q={}\\n'.format(search_str), fg='blue', underline=True,", "inner(exctype, value, tb): if issubclass(exctype, errors.BRIEF_ERRORS): click.secho(\"\\n\\n {}: \".format(exctype.__name__), nl=False, fg='red', bold=True, err=True)", "connect to url \"{}\"'.format(value.request.url), err=True) click.echo(err=True) else: msg = \"\\n\\n Critical! Unhandled Exception\\n", "uname = platform_uname() header = '[Unhandled Exception at {}] system={}, stable.world version: {}'" ]