repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
intiocean/pyinter
pyinter/interval_set.py
IntervalSet.union
def union(self, other): """Returns a new IntervalSet which represents the union of each of the intervals in this IntervalSet with each of the intervals in the other IntervalSet :param other: An IntervalSet to union with this one. """ result = IntervalSet() for el in self: result.add(el) for el in other: result.add(el) return result
python
def union(self, other): """Returns a new IntervalSet which represents the union of each of the intervals in this IntervalSet with each of the intervals in the other IntervalSet :param other: An IntervalSet to union with this one. """ result = IntervalSet() for el in self: result.add(el) for el in other: result.add(el) return result
[ "def", "union", "(", "self", ",", "other", ")", ":", "result", "=", "IntervalSet", "(", ")", "for", "el", "in", "self", ":", "result", ".", "add", "(", "el", ")", "for", "el", "in", "other", ":", "result", ".", "add", "(", "el", ")", "return", ...
Returns a new IntervalSet which represents the union of each of the intervals in this IntervalSet with each of the intervals in the other IntervalSet :param other: An IntervalSet to union with this one.
[ "Returns", "a", "new", "IntervalSet", "which", "represents", "the", "union", "of", "each", "of", "the", "intervals", "in", "this", "IntervalSet", "with", "each", "of", "the", "intervals", "in", "the", "other", "IntervalSet", ":", "param", "other", ":", "An",...
train
https://github.com/intiocean/pyinter/blob/fb6e904307477fa43123cc9ab326680aa1a8cd62/pyinter/interval_set.py#L69-L79
intiocean/pyinter
pyinter/interval_set.py
IntervalSet.add
def add(self, other): """ Add an Interval to the IntervalSet by taking the union of the given Interval object with the existing Interval objects in self. This has no effect if the Interval is already represented. :param other: an Interval to add to this IntervalSet. """ if other.empty(): return to_add = set() for inter in self: if inter.overlaps(other): # if it overlaps with this interval then the union will be a single interval to_add.add(inter.union(other)) if len(to_add) == 0: # other must not overlap with any interval in self (self could be empty!) to_add.add(other) # Now add the intervals found to self if len(to_add) > 1: set_to_add = IntervalSet(to_add) # creating an interval set unions any overlapping intervals for el in set_to_add: self._add(el) elif len(to_add) == 1: self._add(to_add.pop())
python
def add(self, other): """ Add an Interval to the IntervalSet by taking the union of the given Interval object with the existing Interval objects in self. This has no effect if the Interval is already represented. :param other: an Interval to add to this IntervalSet. """ if other.empty(): return to_add = set() for inter in self: if inter.overlaps(other): # if it overlaps with this interval then the union will be a single interval to_add.add(inter.union(other)) if len(to_add) == 0: # other must not overlap with any interval in self (self could be empty!) to_add.add(other) # Now add the intervals found to self if len(to_add) > 1: set_to_add = IntervalSet(to_add) # creating an interval set unions any overlapping intervals for el in set_to_add: self._add(el) elif len(to_add) == 1: self._add(to_add.pop())
[ "def", "add", "(", "self", ",", "other", ")", ":", "if", "other", ".", "empty", "(", ")", ":", "return", "to_add", "=", "set", "(", ")", "for", "inter", "in", "self", ":", "if", "inter", ".", "overlaps", "(", "other", ")", ":", "# if it overlaps wi...
Add an Interval to the IntervalSet by taking the union of the given Interval object with the existing Interval objects in self. This has no effect if the Interval is already represented. :param other: an Interval to add to this IntervalSet.
[ "Add", "an", "Interval", "to", "the", "IntervalSet", "by", "taking", "the", "union", "of", "the", "given", "Interval", "object", "with", "the", "existing", "Interval", "objects", "in", "self", "." ]
train
https://github.com/intiocean/pyinter/blob/fb6e904307477fa43123cc9ab326680aa1a8cd62/pyinter/interval_set.py#L84-L107
intiocean/pyinter
pyinter/interval_set.py
IntervalSet.difference
def difference(self, other): """ Subtract an Interval or IntervalSet from the intervals in the set. """ intervals = other if isinstance(other, IntervalSet) else IntervalSet((other,)) result = IntervalSet() for left in self: for right in intervals: left = left - right if isinstance(left, IntervalSet): for interval in left: result.add(interval) else: result.add(left) return result
python
def difference(self, other): """ Subtract an Interval or IntervalSet from the intervals in the set. """ intervals = other if isinstance(other, IntervalSet) else IntervalSet((other,)) result = IntervalSet() for left in self: for right in intervals: left = left - right if isinstance(left, IntervalSet): for interval in left: result.add(interval) else: result.add(left) return result
[ "def", "difference", "(", "self", ",", "other", ")", ":", "intervals", "=", "other", "if", "isinstance", "(", "other", ",", "IntervalSet", ")", "else", "IntervalSet", "(", "(", "other", ",", ")", ")", "result", "=", "IntervalSet", "(", ")", "for", "lef...
Subtract an Interval or IntervalSet from the intervals in the set.
[ "Subtract", "an", "Interval", "or", "IntervalSet", "from", "the", "intervals", "in", "the", "set", "." ]
train
https://github.com/intiocean/pyinter/blob/fb6e904307477fa43123cc9ab326680aa1a8cd62/pyinter/interval_set.py#L112-L126
pebble/libpebble2
libpebble2/services/notifications.py
Notifications.send_notification
def send_notification(self, subject="", message="", sender="", source=None, actions=None): """ Sends a notification. Blocks as long as necessary. :param subject: The subject. :type subject: str :param message: The message. :type message: str :param sender: The sender. :type sender: str :param source: The source of the notification :type source: .LegacyNotification.Source :param actions Actions to be sent with a notification (list of TimelineAction objects) :type actions list """ if self._pebble.firmware_version.major < 3: self._send_legacy_notification(subject, message, sender, source) else: self._send_modern_notification(subject, message, sender, source, actions)
python
def send_notification(self, subject="", message="", sender="", source=None, actions=None): """ Sends a notification. Blocks as long as necessary. :param subject: The subject. :type subject: str :param message: The message. :type message: str :param sender: The sender. :type sender: str :param source: The source of the notification :type source: .LegacyNotification.Source :param actions Actions to be sent with a notification (list of TimelineAction objects) :type actions list """ if self._pebble.firmware_version.major < 3: self._send_legacy_notification(subject, message, sender, source) else: self._send_modern_notification(subject, message, sender, source, actions)
[ "def", "send_notification", "(", "self", ",", "subject", "=", "\"\"", ",", "message", "=", "\"\"", ",", "sender", "=", "\"\"", ",", "source", "=", "None", ",", "actions", "=", "None", ")", ":", "if", "self", ".", "_pebble", ".", "firmware_version", "."...
Sends a notification. Blocks as long as necessary. :param subject: The subject. :type subject: str :param message: The message. :type message: str :param sender: The sender. :type sender: str :param source: The source of the notification :type source: .LegacyNotification.Source :param actions Actions to be sent with a notification (list of TimelineAction objects) :type actions list
[ "Sends", "a", "notification", ".", "Blocks", "as", "long", "as", "necessary", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/notifications.py#L33-L51
agusmakmun/djipsum
djipsum/fields.py
DjipsumFields.randomBinaryField
def randomBinaryField(self): """ Return random bytes format. """ lst = [ b"hello world", b"this is bytes", b"awesome django", b"djipsum is awesome", b"\x00\x01\x02\x03\x04\x05\x06\x07", b"\x0b\x0c\x0e\x0f" ] return self.randomize(lst)
python
def randomBinaryField(self): """ Return random bytes format. """ lst = [ b"hello world", b"this is bytes", b"awesome django", b"djipsum is awesome", b"\x00\x01\x02\x03\x04\x05\x06\x07", b"\x0b\x0c\x0e\x0f" ] return self.randomize(lst)
[ "def", "randomBinaryField", "(", "self", ")", ":", "lst", "=", "[", "b\"hello world\"", ",", "b\"this is bytes\"", ",", "b\"awesome django\"", ",", "b\"djipsum is awesome\"", ",", "b\"\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\"", ",", "b\"\\x0b\\x0c\\x0e\\x0f\"", "]", "retur...
Return random bytes format.
[ "Return", "random", "bytes", "format", "." ]
train
https://github.com/agusmakmun/djipsum/blob/e7950556422b4039092db2083db7a83728230977/djipsum/fields.py#L27-L39
agusmakmun/djipsum
djipsum/fields.py
DjipsumFields.randomCharField
def randomCharField(self, model_class, field_name): """ Checking if `field_name` has choices. Then, returning random value from it. Result of: `available_choices` [ ('project', 'I wanna to talk about project'), ('feedback', 'I want to report a bugs or give feedback'), ('hello', 'I just want to say hello') ] """ try: available_choices = model_class._meta.get_field(field_name).get_choices()[1:] return self.randomize([ci[0] for ci in available_choices]) except AttributeError: lst = [ "Enthusiastically whiteboard synergistic methods", "Authoritatively scale progressive meta-services through", "Objectively implement client-centered supply chains via stand-alone", "Phosfluorescently productize accurate products after cooperative results", "Appropriately drive cutting-edge systems before optimal scenarios", "Uniquely productize viral ROI for competitive e-markets" "Uniquely repurpose high-quality models vis-a-vis", "Django is Fucking Awesome? Yes" ] return self.randomize(lst)
python
def randomCharField(self, model_class, field_name): """ Checking if `field_name` has choices. Then, returning random value from it. Result of: `available_choices` [ ('project', 'I wanna to talk about project'), ('feedback', 'I want to report a bugs or give feedback'), ('hello', 'I just want to say hello') ] """ try: available_choices = model_class._meta.get_field(field_name).get_choices()[1:] return self.randomize([ci[0] for ci in available_choices]) except AttributeError: lst = [ "Enthusiastically whiteboard synergistic methods", "Authoritatively scale progressive meta-services through", "Objectively implement client-centered supply chains via stand-alone", "Phosfluorescently productize accurate products after cooperative results", "Appropriately drive cutting-edge systems before optimal scenarios", "Uniquely productize viral ROI for competitive e-markets" "Uniquely repurpose high-quality models vis-a-vis", "Django is Fucking Awesome? Yes" ] return self.randomize(lst)
[ "def", "randomCharField", "(", "self", ",", "model_class", ",", "field_name", ")", ":", "try", ":", "available_choices", "=", "model_class", ".", "_meta", ".", "get_field", "(", "field_name", ")", ".", "get_choices", "(", ")", "[", "1", ":", "]", "return",...
Checking if `field_name` has choices. Then, returning random value from it. Result of: `available_choices` [ ('project', 'I wanna to talk about project'), ('feedback', 'I want to report a bugs or give feedback'), ('hello', 'I just want to say hello') ]
[ "Checking", "if", "field_name", "has", "choices", ".", "Then", "returning", "random", "value", "from", "it", ".", "Result", "of", ":", "available_choices", "[", "(", "project", "I", "wanna", "to", "talk", "about", "project", ")", "(", "feedback", "I", "wan...
train
https://github.com/agusmakmun/djipsum/blob/e7950556422b4039092db2083db7a83728230977/djipsum/fields.py#L41-L67
agusmakmun/djipsum
djipsum/fields.py
DjipsumFields.randomCommaSeparatedIntegerField
def randomCommaSeparatedIntegerField(self): """ Return the unique integers in the string such as below: '6,1,7' or '4,5,1,3,2' or '2,7,9,3,5,4,1' or '3,9,2,8,7,1,5,4,6' """ randint = lambda max: ",".join( [str(x) for x in random.sample(range(1, 10), max)] ) lst = [ randint(3), randint(5), randint(7), randint(9) ] return self.randomize(lst)
python
def randomCommaSeparatedIntegerField(self): """ Return the unique integers in the string such as below: '6,1,7' or '4,5,1,3,2' or '2,7,9,3,5,4,1' or '3,9,2,8,7,1,5,4,6' """ randint = lambda max: ",".join( [str(x) for x in random.sample(range(1, 10), max)] ) lst = [ randint(3), randint(5), randint(7), randint(9) ] return self.randomize(lst)
[ "def", "randomCommaSeparatedIntegerField", "(", "self", ")", ":", "randint", "=", "lambda", "max", ":", "\",\"", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "random", ".", "sample", "(", "range", "(", "1", ",", "10", ")", ",", "m...
Return the unique integers in the string such as below: '6,1,7' or '4,5,1,3,2' or '2,7,9,3,5,4,1' or '3,9,2,8,7,1,5,4,6'
[ "Return", "the", "unique", "integers", "in", "the", "string", "such", "as", "below", ":", "6", "1", "7", "or", "4", "5", "1", "3", "2", "or", "2", "7", "9", "3", "5", "4", "1", "or", "3", "9", "2", "8", "7", "1", "5", "4", "6" ]
train
https://github.com/agusmakmun/djipsum/blob/e7950556422b4039092db2083db7a83728230977/djipsum/fields.py#L69-L83
agusmakmun/djipsum
djipsum/fields.py
DjipsumFields.randomDecimalField
def randomDecimalField(self, model_class, field_name): """ Validate if the field has a `max_digits` and `decimal_places` And generating the unique decimal number. """ decimal_field = model_class._meta.get_field(field_name) max_digits = None decimal_places = None if decimal_field.max_digits is not None: max_digits = decimal_field.max_digits if decimal_field.decimal_places is not None: decimal_places = decimal_field.decimal_places digits = random.choice(range(100)) if max_digits is not None: start = 0 if max_digits < start: start = max_digits - max_digits digits = int( "".join([ str(x) for x in random.sample( range(start, max_digits), max_digits - 1 ) ]) ) places = random.choice(range(10, 99)) if decimal_places is not None: places = str( random.choice(range(9999 * 99999)) )[:decimal_places] return float( str(digits)[:decimal_places] + "." + str(places) )
python
def randomDecimalField(self, model_class, field_name): """ Validate if the field has a `max_digits` and `decimal_places` And generating the unique decimal number. """ decimal_field = model_class._meta.get_field(field_name) max_digits = None decimal_places = None if decimal_field.max_digits is not None: max_digits = decimal_field.max_digits if decimal_field.decimal_places is not None: decimal_places = decimal_field.decimal_places digits = random.choice(range(100)) if max_digits is not None: start = 0 if max_digits < start: start = max_digits - max_digits digits = int( "".join([ str(x) for x in random.sample( range(start, max_digits), max_digits - 1 ) ]) ) places = random.choice(range(10, 99)) if decimal_places is not None: places = str( random.choice(range(9999 * 99999)) )[:decimal_places] return float( str(digits)[:decimal_places] + "." + str(places) )
[ "def", "randomDecimalField", "(", "self", ",", "model_class", ",", "field_name", ")", ":", "decimal_field", "=", "model_class", ".", "_meta", ".", "get_field", "(", "field_name", ")", "max_digits", "=", "None", "decimal_places", "=", "None", "if", "decimal_field...
Validate if the field has a `max_digits` and `decimal_places` And generating the unique decimal number.
[ "Validate", "if", "the", "field", "has", "a", "max_digits", "and", "decimal_places", "And", "generating", "the", "unique", "decimal", "number", "." ]
train
https://github.com/agusmakmun/djipsum/blob/e7950556422b4039092db2083db7a83728230977/djipsum/fields.py#L85-L120
agusmakmun/djipsum
djipsum/fields.py
DjipsumFields.randomSlugField
def randomSlugField(self): """ Return the unique slug by generating the uuid4 to fix the duplicate slug (unique=True) """ lst = [ "sample-slug-{}".format(uuid.uuid4().hex), "awesome-djipsum-{}".format(uuid.uuid4().hex), "unique-slug-{}".format(uuid.uuid4().hex) ] return self.randomize(lst)
python
def randomSlugField(self): """ Return the unique slug by generating the uuid4 to fix the duplicate slug (unique=True) """ lst = [ "sample-slug-{}".format(uuid.uuid4().hex), "awesome-djipsum-{}".format(uuid.uuid4().hex), "unique-slug-{}".format(uuid.uuid4().hex) ] return self.randomize(lst)
[ "def", "randomSlugField", "(", "self", ")", ":", "lst", "=", "[", "\"sample-slug-{}\"", ".", "format", "(", "uuid", ".", "uuid4", "(", ")", ".", "hex", ")", ",", "\"awesome-djipsum-{}\"", ".", "format", "(", "uuid", ".", "uuid4", "(", ")", ".", "hex", ...
Return the unique slug by generating the uuid4 to fix the duplicate slug (unique=True)
[ "Return", "the", "unique", "slug", "by", "generating", "the", "uuid4", "to", "fix", "the", "duplicate", "slug", "(", "unique", "=", "True", ")" ]
train
https://github.com/agusmakmun/djipsum/blob/e7950556422b4039092db2083db7a83728230977/djipsum/fields.py#L170-L180
agusmakmun/djipsum
djipsum/fields.py
DjipsumFields.randomUUIDField
def randomUUIDField(self): """ Return the unique uuid from uuid1, uuid3, uuid4, or uuid5. """ uuid1 = uuid.uuid1().hex uuid3 = uuid.uuid3( uuid.NAMESPACE_URL, self.randomize(['python', 'django', 'awesome']) ).hex uuid4 = uuid.uuid4().hex uuid5 = uuid.uuid5( uuid.NAMESPACE_DNS, self.randomize(['python', 'django', 'awesome']) ).hex return self.randomize([uuid1, uuid3, uuid4, uuid5])
python
def randomUUIDField(self): """ Return the unique uuid from uuid1, uuid3, uuid4, or uuid5. """ uuid1 = uuid.uuid1().hex uuid3 = uuid.uuid3( uuid.NAMESPACE_URL, self.randomize(['python', 'django', 'awesome']) ).hex uuid4 = uuid.uuid4().hex uuid5 = uuid.uuid5( uuid.NAMESPACE_DNS, self.randomize(['python', 'django', 'awesome']) ).hex return self.randomize([uuid1, uuid3, uuid4, uuid5])
[ "def", "randomUUIDField", "(", "self", ")", ":", "uuid1", "=", "uuid", ".", "uuid1", "(", ")", ".", "hex", "uuid3", "=", "uuid", ".", "uuid3", "(", "uuid", ".", "NAMESPACE_URL", ",", "self", ".", "randomize", "(", "[", "'python'", ",", "'django'", ",...
Return the unique uuid from uuid1, uuid3, uuid4, or uuid5.
[ "Return", "the", "unique", "uuid", "from", "uuid1", "uuid3", "uuid4", "or", "uuid5", "." ]
train
https://github.com/agusmakmun/djipsum/blob/e7950556422b4039092db2083db7a83728230977/djipsum/fields.py#L226-L240
agusmakmun/djipsum
djipsum/fields.py
DjipsumFields.getOrCreateForeignKey
def getOrCreateForeignKey(self, model_class, field_name): """ Return related random object to set as ForeignKey. """ # Getting related object type # Eg: <django.db.models.fields.related.ForeignKey: test_ForeignKey> instance = getattr(model_class, field_name).field # Getting the model name by instance to find/create first id/pk. # Eg: <class 'django.contrib.auth.models.User'> related_model = instance.related_model().__class__ # Trying to get random id from queryset. objects = related_model.objects.all() if objects.exists(): return self.randomize(objects) # Returning first object from tuple `(<User: user_name>, False)` return related_model.objects.get_or_create(pk=1)[0]
python
def getOrCreateForeignKey(self, model_class, field_name): """ Return related random object to set as ForeignKey. """ # Getting related object type # Eg: <django.db.models.fields.related.ForeignKey: test_ForeignKey> instance = getattr(model_class, field_name).field # Getting the model name by instance to find/create first id/pk. # Eg: <class 'django.contrib.auth.models.User'> related_model = instance.related_model().__class__ # Trying to get random id from queryset. objects = related_model.objects.all() if objects.exists(): return self.randomize(objects) # Returning first object from tuple `(<User: user_name>, False)` return related_model.objects.get_or_create(pk=1)[0]
[ "def", "getOrCreateForeignKey", "(", "self", ",", "model_class", ",", "field_name", ")", ":", "# Getting related object type", "# Eg: <django.db.models.fields.related.ForeignKey: test_ForeignKey>", "instance", "=", "getattr", "(", "model_class", ",", "field_name", ")", ".", ...
Return related random object to set as ForeignKey.
[ "Return", "related", "random", "object", "to", "set", "as", "ForeignKey", "." ]
train
https://github.com/agusmakmun/djipsum/blob/e7950556422b4039092db2083db7a83728230977/djipsum/fields.py#L242-L260
agusmakmun/djipsum
djipsum/fields.py
DjipsumFields.create_validated_fields
def create_validated_fields(self): """ To generate lorem ipsum by validated fields for the model. """ model_class = self.model_class fields = self.fields maximum = self.maximum objects = [] for n in range(maximum): data_dict = {} for field in fields: def default_assign(func): data_dict[field['field_name']] = func def string_assign(func): data_dict[field['field_name']] = str(func) if field['field_type'] == 'BigIntegerField': # values from -9223372036854775808 to 9223372036854775807 default_assign(random.randint(-9223372036854775808, 9223372036854775807)) elif field['field_type'] == 'BinaryField': # b'', self.randomBinaryField() default_assign(self.randomBinaryField()) elif field['field_type'] == 'BooleanField': # True/False default_assign(self.randomize([True, False])) elif field['field_type'] == 'CharField': # self.randomCharField() string_assign(self.randomCharField(model_class, field['field_name'])) elif field['field_type'] == 'CommaSeparatedIntegerField': # self.randomCommaSeparatedIntegerField() string_assign(self.randomCommaSeparatedIntegerField()) elif field['field_type'] == 'DateField': # '2016-10-11' string_assign(str(datetime.datetime.now().date())) elif field['field_type'] == 'DateTimeField': # '2016-10-11 00:44:08.864285' string_assign(str(datetime.datetime.now())) elif field['field_type'] == 'DecimalField': # self.randomDecimalField() default_assign(self.randomDecimalField(model_class, field['field_name'])) elif field['field_type'] == 'DurationField': # such as 1 day, 4 days or else. default_assign(datetime.timedelta(days=random.randint(1, 10))) elif field['field_type'] == 'EmailField': # self.randomEmailField() string_assign(self.randomEmailField()) elif field['field_type'] == 'FileField': # self.randomFileField() string_assign(self.randomFileField()) elif field['field_type'] == 'FloatField': # 1.92, 0.0, 5.0, or else. default_assign(float(("%.2f" % float(random.randint(0, 100) / 13)))) elif field['field_type'] == 'ImageField': # self.randomImageField() string_assign(self.randomImageField()) elif field['field_type'] == 'IntegerField': # values from -2147483648 to 2147483647 default_assign(random.randint(-2147483648, 2147483647)) elif field['field_type'] == 'GenericIPAddressField': # self.randomGenericIPAddressField() string_assign(self.randomGenericIPAddressField()) elif field['field_type'] == 'NullBooleanField': # by Default is None/null default_assign(self.randomize([None, True, False])) elif field['field_type'] == 'PositiveIntegerField': # values from 0 to 2147483647 default_assign(random.randint(0, 2147483647)) elif field['field_type'] == 'PositiveSmallIntegerField': # values from 0 to 32767 default_assign(random.randint(0, 32767)) elif field['field_type'] == 'SlugField': # self.randomSlugField() string_assign(self.randomSlugField()) elif field['field_type'] == 'SmallIntegerField': # values from -32768 to 32767 default_assign(random.randint(-32768, 32767)) elif field['field_type'] == 'TextField': # self.randomTextField() string_assign(self.randomTextField()) elif field['field_type'] == 'TimeField': # accepts the same as DateField string_assign(str(datetime.datetime.now().date())) elif field['field_type'] == 'URLField': # self.randomURLField() string_assign(self.randomURLField()) elif field['field_type'] == 'UUIDField': # self.randomUUIDField() string_assign(self.randomUUIDField()) elif field['field_type'] == 'ForeignKey': # self.getOrCreateForeignKey() default_assign(self.getOrCreateForeignKey(model_class, field['field_name'])) # elif field['field_type'] == 'OneToOneField': # pk/id -> not fixed yet. # default_assign(self.randomize([1, ])) # Unsolved: need specific pk/id obj = model_class.objects.create(**data_dict) # Because the Relationship Model need specific id from the object, # so, i handle it after created the object. for field in fields: if field['field_type'] == 'ManyToManyField': # Find the instance model field from `obj` already created before. instance_m2m = getattr(obj, field['field_name']) objects_m2m = instance_m2m.model.objects.all() # Djipsum only create the `ManyToManyField` if the related object is exists. if objects_m2m.exists(): ids_m2m = [i.pk for i in objects_m2m] random_decission = random.sample( range(min(ids_m2m), max(ids_m2m)), max(ids_m2m) - 1 ) # Let me know if the `random_decission` has minimum objects to be choice. if len(random_decission) <= 2: random_decission = [self.randomize(ids_m2m)] related_objects = [ rel_obj for rel_obj in objects_m2m if rel_obj.pk in random_decission ] # adding the `ManyToManyField` instance_m2m.add(*related_objects) try: obj.save_m2m() except: obj.save() objects.append(obj) return objects
python
def create_validated_fields(self): """ To generate lorem ipsum by validated fields for the model. """ model_class = self.model_class fields = self.fields maximum = self.maximum objects = [] for n in range(maximum): data_dict = {} for field in fields: def default_assign(func): data_dict[field['field_name']] = func def string_assign(func): data_dict[field['field_name']] = str(func) if field['field_type'] == 'BigIntegerField': # values from -9223372036854775808 to 9223372036854775807 default_assign(random.randint(-9223372036854775808, 9223372036854775807)) elif field['field_type'] == 'BinaryField': # b'', self.randomBinaryField() default_assign(self.randomBinaryField()) elif field['field_type'] == 'BooleanField': # True/False default_assign(self.randomize([True, False])) elif field['field_type'] == 'CharField': # self.randomCharField() string_assign(self.randomCharField(model_class, field['field_name'])) elif field['field_type'] == 'CommaSeparatedIntegerField': # self.randomCommaSeparatedIntegerField() string_assign(self.randomCommaSeparatedIntegerField()) elif field['field_type'] == 'DateField': # '2016-10-11' string_assign(str(datetime.datetime.now().date())) elif field['field_type'] == 'DateTimeField': # '2016-10-11 00:44:08.864285' string_assign(str(datetime.datetime.now())) elif field['field_type'] == 'DecimalField': # self.randomDecimalField() default_assign(self.randomDecimalField(model_class, field['field_name'])) elif field['field_type'] == 'DurationField': # such as 1 day, 4 days or else. default_assign(datetime.timedelta(days=random.randint(1, 10))) elif field['field_type'] == 'EmailField': # self.randomEmailField() string_assign(self.randomEmailField()) elif field['field_type'] == 'FileField': # self.randomFileField() string_assign(self.randomFileField()) elif field['field_type'] == 'FloatField': # 1.92, 0.0, 5.0, or else. default_assign(float(("%.2f" % float(random.randint(0, 100) / 13)))) elif field['field_type'] == 'ImageField': # self.randomImageField() string_assign(self.randomImageField()) elif field['field_type'] == 'IntegerField': # values from -2147483648 to 2147483647 default_assign(random.randint(-2147483648, 2147483647)) elif field['field_type'] == 'GenericIPAddressField': # self.randomGenericIPAddressField() string_assign(self.randomGenericIPAddressField()) elif field['field_type'] == 'NullBooleanField': # by Default is None/null default_assign(self.randomize([None, True, False])) elif field['field_type'] == 'PositiveIntegerField': # values from 0 to 2147483647 default_assign(random.randint(0, 2147483647)) elif field['field_type'] == 'PositiveSmallIntegerField': # values from 0 to 32767 default_assign(random.randint(0, 32767)) elif field['field_type'] == 'SlugField': # self.randomSlugField() string_assign(self.randomSlugField()) elif field['field_type'] == 'SmallIntegerField': # values from -32768 to 32767 default_assign(random.randint(-32768, 32767)) elif field['field_type'] == 'TextField': # self.randomTextField() string_assign(self.randomTextField()) elif field['field_type'] == 'TimeField': # accepts the same as DateField string_assign(str(datetime.datetime.now().date())) elif field['field_type'] == 'URLField': # self.randomURLField() string_assign(self.randomURLField()) elif field['field_type'] == 'UUIDField': # self.randomUUIDField() string_assign(self.randomUUIDField()) elif field['field_type'] == 'ForeignKey': # self.getOrCreateForeignKey() default_assign(self.getOrCreateForeignKey(model_class, field['field_name'])) # elif field['field_type'] == 'OneToOneField': # pk/id -> not fixed yet. # default_assign(self.randomize([1, ])) # Unsolved: need specific pk/id obj = model_class.objects.create(**data_dict) # Because the Relationship Model need specific id from the object, # so, i handle it after created the object. for field in fields: if field['field_type'] == 'ManyToManyField': # Find the instance model field from `obj` already created before. instance_m2m = getattr(obj, field['field_name']) objects_m2m = instance_m2m.model.objects.all() # Djipsum only create the `ManyToManyField` if the related object is exists. if objects_m2m.exists(): ids_m2m = [i.pk for i in objects_m2m] random_decission = random.sample( range(min(ids_m2m), max(ids_m2m)), max(ids_m2m) - 1 ) # Let me know if the `random_decission` has minimum objects to be choice. if len(random_decission) <= 2: random_decission = [self.randomize(ids_m2m)] related_objects = [ rel_obj for rel_obj in objects_m2m if rel_obj.pk in random_decission ] # adding the `ManyToManyField` instance_m2m.add(*related_objects) try: obj.save_m2m() except: obj.save() objects.append(obj) return objects
[ "def", "create_validated_fields", "(", "self", ")", ":", "model_class", "=", "self", ".", "model_class", "fields", "=", "self", ".", "fields", "maximum", "=", "self", ".", "maximum", "objects", "=", "[", "]", "for", "n", "in", "range", "(", "maximum", ")...
To generate lorem ipsum by validated fields for the model.
[ "To", "generate", "lorem", "ipsum", "by", "validated", "fields", "for", "the", "model", "." ]
train
https://github.com/agusmakmun/djipsum/blob/e7950556422b4039092db2083db7a83728230977/djipsum/fields.py#L262-L365
pebble/libpebble2
libpebble2/services/getbytes.py
GetBytesService.get_flash_region
def get_flash_region(self, offset, length): """ Retrieves the contents of a region of flash from the watch. This only works on watches running non-release firmware. Raises :exc:`.GetBytesError` on failure. :return: The retrieved data :rtype: bytes """ return self._get(GetBytesFlashRequest(offset=offset, length=length))
python
def get_flash_region(self, offset, length): """ Retrieves the contents of a region of flash from the watch. This only works on watches running non-release firmware. Raises :exc:`.GetBytesError` on failure. :return: The retrieved data :rtype: bytes """ return self._get(GetBytesFlashRequest(offset=offset, length=length))
[ "def", "get_flash_region", "(", "self", ",", "offset", ",", "length", ")", ":", "return", "self", ".", "_get", "(", "GetBytesFlashRequest", "(", "offset", "=", "offset", ",", "length", "=", "length", ")", ")" ]
Retrieves the contents of a region of flash from the watch. This only works on watches running non-release firmware. Raises :exc:`.GetBytesError` on failure. :return: The retrieved data :rtype: bytes
[ "Retrieves", "the", "contents", "of", "a", "region", "of", "flash", "from", "the", "watch", ".", "This", "only", "works", "on", "watches", "running", "non", "-", "release", "firmware", ".", "Raises", ":", "exc", ":", ".", "GetBytesError", "on", "failure", ...
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/getbytes.py#L47-L56
pebble/libpebble2
libpebble2/services/screenshot.py
Screenshot.grab_image
def grab_image(self): """ Takes a screenshot. Blocks until completion, or raises a :exc:`.ScreenshotError` on failure. While this method is executing, "progress" events will periodically be emitted with the following signature: :: (downloaded_so_far, total_size) :return: A list of bytearrays in RGB8 format, where each bytearray is one row of the image. """ # We have to open this queue before we make the request, to ensure we don't miss the response. queue = self._pebble.get_endpoint_queue(ScreenshotResponse) self._pebble.send_packet(ScreenshotRequest()) return self._read_screenshot(queue)
python
def grab_image(self): """ Takes a screenshot. Blocks until completion, or raises a :exc:`.ScreenshotError` on failure. While this method is executing, "progress" events will periodically be emitted with the following signature: :: (downloaded_so_far, total_size) :return: A list of bytearrays in RGB8 format, where each bytearray is one row of the image. """ # We have to open this queue before we make the request, to ensure we don't miss the response. queue = self._pebble.get_endpoint_queue(ScreenshotResponse) self._pebble.send_packet(ScreenshotRequest()) return self._read_screenshot(queue)
[ "def", "grab_image", "(", "self", ")", ":", "# We have to open this queue before we make the request, to ensure we don't miss the response.", "queue", "=", "self", ".", "_pebble", ".", "get_endpoint_queue", "(", "ScreenshotResponse", ")", "self", ".", "_pebble", ".", "send_...
Takes a screenshot. Blocks until completion, or raises a :exc:`.ScreenshotError` on failure. While this method is executing, "progress" events will periodically be emitted with the following signature: :: (downloaded_so_far, total_size) :return: A list of bytearrays in RGB8 format, where each bytearray is one row of the image.
[ "Takes", "a", "screenshot", ".", "Blocks", "until", "completion", "or", "raises", "a", ":", "exc", ":", ".", "ScreenshotError", "on", "failure", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/screenshot.py#L23-L36
alpha-xone/xone
xone/utils.py
trade_day
def trade_day(dt, cal='US'): """ Latest trading day w.r.t given dt Args: dt: date of reference cal: trading calendar Returns: pd.Timestamp: last trading day Examples: >>> trade_day('2018-12-25').strftime('%Y-%m-%d') '2018-12-24' """ from xone import calendar dt = pd.Timestamp(dt).date() return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
python
def trade_day(dt, cal='US'): """ Latest trading day w.r.t given dt Args: dt: date of reference cal: trading calendar Returns: pd.Timestamp: last trading day Examples: >>> trade_day('2018-12-25').strftime('%Y-%m-%d') '2018-12-24' """ from xone import calendar dt = pd.Timestamp(dt).date() return calendar.trading_dates(start=dt - pd.Timedelta('10D'), end=dt, calendar=cal)[-1]
[ "def", "trade_day", "(", "dt", ",", "cal", "=", "'US'", ")", ":", "from", "xone", "import", "calendar", "dt", "=", "pd", ".", "Timestamp", "(", "dt", ")", ".", "date", "(", ")", "return", "calendar", ".", "trading_dates", "(", "start", "=", "dt", "...
Latest trading day w.r.t given dt Args: dt: date of reference cal: trading calendar Returns: pd.Timestamp: last trading day Examples: >>> trade_day('2018-12-25').strftime('%Y-%m-%d') '2018-12-24'
[ "Latest", "trading", "day", "w", ".", "r", ".", "t", "given", "dt" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L52-L70
alpha-xone/xone
xone/utils.py
cur_time
def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'): """ Current time Args: typ: one of ['date', 'time', 'time_path', 'raw', ''] tz: timezone trading: check if current date is trading day cal: trading calendar Returns: relevant current time or date Examples: >>> cur_dt = pd.Timestamp('now') >>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d') True >>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S') True >>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S') True >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp) True >>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp) True >>> cur_time(typ='', trading=False) == cur_dt.date() True """ dt = pd.Timestamp('now', tz=tz) if typ == 'date': if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d') else: return dt.strftime('%Y-%m-%d') if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S') if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S') if typ == 'raw': return dt return trade_day(dt).date() if trading else dt.date()
python
def cur_time(typ='date', tz=DEFAULT_TZ, trading=True, cal='US'): """ Current time Args: typ: one of ['date', 'time', 'time_path', 'raw', ''] tz: timezone trading: check if current date is trading day cal: trading calendar Returns: relevant current time or date Examples: >>> cur_dt = pd.Timestamp('now') >>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d') True >>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S') True >>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S') True >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp) True >>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp) True >>> cur_time(typ='', trading=False) == cur_dt.date() True """ dt = pd.Timestamp('now', tz=tz) if typ == 'date': if trading: return trade_day(dt=dt, cal=cal).strftime('%Y-%m-%d') else: return dt.strftime('%Y-%m-%d') if typ == 'time': return dt.strftime('%Y-%m-%d %H:%M:%S') if typ == 'time_path': return dt.strftime('%Y-%m-%d/%H-%M-%S') if typ == 'raw': return dt return trade_day(dt).date() if trading else dt.date()
[ "def", "cur_time", "(", "typ", "=", "'date'", ",", "tz", "=", "DEFAULT_TZ", ",", "trading", "=", "True", ",", "cal", "=", "'US'", ")", ":", "dt", "=", "pd", ".", "Timestamp", "(", "'now'", ",", "tz", "=", "tz", ")", "if", "typ", "==", "'date'", ...
Current time Args: typ: one of ['date', 'time', 'time_path', 'raw', ''] tz: timezone trading: check if current date is trading day cal: trading calendar Returns: relevant current time or date Examples: >>> cur_dt = pd.Timestamp('now') >>> cur_time(typ='date', trading=False) == cur_dt.strftime('%Y-%m-%d') True >>> cur_time(typ='time', trading=False) == cur_dt.strftime('%Y-%m-%d %H:%M:%S') True >>> cur_time(typ='time_path', trading=False) == cur_dt.strftime('%Y-%m-%d/%H-%M-%S') True >>> isinstance(cur_time(typ='raw', tz='Europe/London'), pd.Timestamp) True >>> isinstance(cur_time(typ='raw', trading=True), pd.Timestamp) True >>> cur_time(typ='', trading=False) == cur_dt.date() True
[ "Current", "time" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L73-L111
alpha-xone/xone
xone/utils.py
align_data
def align_data(*args): """ Resample and aligh data for defined frequency Args: *args: DataFrame of data to be aligned Returns: pd.DataFrame: aligned data with renamed columns Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> d1 price volume 2018-09-10 10:10:00+10:00 31.08 10166 2018-09-10 10:11:00+10:00 31.10 69981 2018-09-10 10:12:00+10:00 31.11 14343 2018-09-10 10:13:00+10:00 31.07 10096 2018-09-10 10:14:00+10:00 31.04 11506 2018-09-10 10:15:00+10:00 31.04 9718 >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> d2 price volume 2018-09-10 10:10:00+10:00 70.81 4749 2018-09-10 10:11:00+10:00 70.78 6762 2018-09-10 10:12:00+10:00 70.85 4908 2018-09-10 10:13:00+10:00 70.79 2002 2018-09-10 10:14:00+10:00 70.79 9170 2018-09-10 10:15:00+10:00 70.79 9791 >>> align_data(d1, d2) price_1 volume_1 price_2 volume_2 2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749 2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762 2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908 2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002 2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170 2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791 """ res = pd.DataFrame(pd.concat([ d.loc[~d.index.duplicated(keep='first')].rename( columns=lambda vv: '%s_%d' % (vv, i + 1) ) for i, d in enumerate(args) ], axis=1)) data_cols = [col for col in res.columns if col[-2:] == '_1'] other_cols = [col for col in res.columns if col[-2:] != '_1'] res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad') return res.dropna(subset=data_cols)
python
def align_data(*args): """ Resample and aligh data for defined frequency Args: *args: DataFrame of data to be aligned Returns: pd.DataFrame: aligned data with renamed columns Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> d1 price volume 2018-09-10 10:10:00+10:00 31.08 10166 2018-09-10 10:11:00+10:00 31.10 69981 2018-09-10 10:12:00+10:00 31.11 14343 2018-09-10 10:13:00+10:00 31.07 10096 2018-09-10 10:14:00+10:00 31.04 11506 2018-09-10 10:15:00+10:00 31.04 9718 >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> d2 price volume 2018-09-10 10:10:00+10:00 70.81 4749 2018-09-10 10:11:00+10:00 70.78 6762 2018-09-10 10:12:00+10:00 70.85 4908 2018-09-10 10:13:00+10:00 70.79 2002 2018-09-10 10:14:00+10:00 70.79 9170 2018-09-10 10:15:00+10:00 70.79 9791 >>> align_data(d1, d2) price_1 volume_1 price_2 volume_2 2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749 2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762 2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908 2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002 2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170 2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791 """ res = pd.DataFrame(pd.concat([ d.loc[~d.index.duplicated(keep='first')].rename( columns=lambda vv: '%s_%d' % (vv, i + 1) ) for i, d in enumerate(args) ], axis=1)) data_cols = [col for col in res.columns if col[-2:] == '_1'] other_cols = [col for col in res.columns if col[-2:] != '_1'] res.loc[:, other_cols] = res.loc[:, other_cols].fillna(method='pad') return res.dropna(subset=data_cols)
[ "def", "align_data", "(", "*", "args", ")", ":", "res", "=", "pd", ".", "DataFrame", "(", "pd", ".", "concat", "(", "[", "d", ".", "loc", "[", "~", "d", ".", "index", ".", "duplicated", "(", "keep", "=", "'first'", ")", "]", ".", "rename", "(",...
Resample and aligh data for defined frequency Args: *args: DataFrame of data to be aligned Returns: pd.DataFrame: aligned data with renamed columns Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> d1 price volume 2018-09-10 10:10:00+10:00 31.08 10166 2018-09-10 10:11:00+10:00 31.10 69981 2018-09-10 10:12:00+10:00 31.11 14343 2018-09-10 10:13:00+10:00 31.07 10096 2018-09-10 10:14:00+10:00 31.04 11506 2018-09-10 10:15:00+10:00 31.04 9718 >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> d2 price volume 2018-09-10 10:10:00+10:00 70.81 4749 2018-09-10 10:11:00+10:00 70.78 6762 2018-09-10 10:12:00+10:00 70.85 4908 2018-09-10 10:13:00+10:00 70.79 2002 2018-09-10 10:14:00+10:00 70.79 9170 2018-09-10 10:15:00+10:00 70.79 9791 >>> align_data(d1, d2) price_1 volume_1 price_2 volume_2 2018-09-10 10:10:00+10:00 31.08 10166 70.81 4749 2018-09-10 10:11:00+10:00 31.10 69981 70.78 6762 2018-09-10 10:12:00+10:00 31.11 14343 70.85 4908 2018-09-10 10:13:00+10:00 31.07 10096 70.79 2002 2018-09-10 10:14:00+10:00 31.04 11506 70.79 9170 2018-09-10 10:15:00+10:00 31.04 9718 70.79 9791
[ "Resample", "and", "aligh", "data", "for", "defined", "frequency" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L114-L167
alpha-xone/xone
xone/utils.py
cat_data
def cat_data(data_kw): """ Concatenate data with ticker as sub column index Args: data_kw: key = ticker, value = pd.DataFrame Returns: pd.DataFrame Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2}) >>> sample.columns MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['ticker', None]) >>> r = sample.transpose().iloc[:, :2] >>> r.index.names = (None, None) >>> r 2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00 BHP AU price 31.08 31.10 volume 10,166.00 69,981.00 RIO AU price 70.81 70.78 volume 4,749.00 6,762.00 """ if len(data_kw) == 0: return pd.DataFrame() return pd.DataFrame(pd.concat([ data.assign(ticker=ticker).set_index('ticker', append=True) .unstack('ticker').swaplevel(0, 1, axis=1) for ticker, data in data_kw.items() ], axis=1))
python
def cat_data(data_kw): """ Concatenate data with ticker as sub column index Args: data_kw: key = ticker, value = pd.DataFrame Returns: pd.DataFrame Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2}) >>> sample.columns MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['ticker', None]) >>> r = sample.transpose().iloc[:, :2] >>> r.index.names = (None, None) >>> r 2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00 BHP AU price 31.08 31.10 volume 10,166.00 69,981.00 RIO AU price 70.81 70.78 volume 4,749.00 6,762.00 """ if len(data_kw) == 0: return pd.DataFrame() return pd.DataFrame(pd.concat([ data.assign(ticker=ticker).set_index('ticker', append=True) .unstack('ticker').swaplevel(0, 1, axis=1) for ticker, data in data_kw.items() ], axis=1))
[ "def", "cat_data", "(", "data_kw", ")", ":", "if", "len", "(", "data_kw", ")", "==", "0", ":", "return", "pd", ".", "DataFrame", "(", ")", "return", "pd", ".", "DataFrame", "(", "pd", ".", "concat", "(", "[", "data", ".", "assign", "(", "ticker", ...
Concatenate data with ticker as sub column index Args: data_kw: key = ticker, value = pd.DataFrame Returns: pd.DataFrame Examples: >>> start = '2018-09-10T10:10:00' >>> tz = 'Australia/Sydney' >>> idx = pd.date_range(start=start, periods=6, freq='min').tz_localize(tz) >>> close_1 = [31.08, 31.10, 31.11, 31.07, 31.04, 31.04] >>> vol_1 = [10166, 69981, 14343, 10096, 11506, 9718] >>> d1 = pd.DataFrame(dict(price=close_1, volume=vol_1), index=idx) >>> close_2 = [70.81, 70.78, 70.85, 70.79, 70.79, 70.79] >>> vol_2 = [4749, 6762, 4908, 2002, 9170, 9791] >>> d2 = pd.DataFrame(dict(price=close_2, volume=vol_2), index=idx) >>> sample = cat_data({'BHP AU': d1, 'RIO AU': d2}) >>> sample.columns MultiIndex(levels=[['BHP AU', 'RIO AU'], ['price', 'volume']], codes=[[0, 0, 1, 1], [0, 1, 0, 1]], names=['ticker', None]) >>> r = sample.transpose().iloc[:, :2] >>> r.index.names = (None, None) >>> r 2018-09-10 10:10:00+10:00 2018-09-10 10:11:00+10:00 BHP AU price 31.08 31.10 volume 10,166.00 69,981.00 RIO AU price 70.81 70.78 volume 4,749.00 6,762.00
[ "Concatenate", "data", "with", "ticker", "as", "sub", "column", "index" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L170-L209
alpha-xone/xone
xone/utils.py
to_frame
def to_frame(data_list, exc_cols=None, **kwargs): """ Dict in Python 3.6 keeps insertion order, but cannot be relied upon This method is to keep column names in order In Python 3.7 this method is redundant Args: data_list: list of dict exc_cols: exclude columns Returns: pd.DataFrame Example: >>> d_list = [ ... dict(sid=1, symbol='1 HK', price=89), ... dict(sid=700, symbol='700 HK', price=350) ... ] >>> to_frame(d_list) sid symbol price 0 1 1 HK 89 1 700 700 HK 350 >>> to_frame(d_list, exc_cols=['price']) sid symbol 0 1 1 HK 1 700 700 HK """ from collections import OrderedDict return pd.DataFrame( pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs ).drop(columns=[] if exc_cols is None else exc_cols)
python
def to_frame(data_list, exc_cols=None, **kwargs): """ Dict in Python 3.6 keeps insertion order, but cannot be relied upon This method is to keep column names in order In Python 3.7 this method is redundant Args: data_list: list of dict exc_cols: exclude columns Returns: pd.DataFrame Example: >>> d_list = [ ... dict(sid=1, symbol='1 HK', price=89), ... dict(sid=700, symbol='700 HK', price=350) ... ] >>> to_frame(d_list) sid symbol price 0 1 1 HK 89 1 700 700 HK 350 >>> to_frame(d_list, exc_cols=['price']) sid symbol 0 1 1 HK 1 700 700 HK """ from collections import OrderedDict return pd.DataFrame( pd.Series(data_list).apply(OrderedDict).tolist(), **kwargs ).drop(columns=[] if exc_cols is None else exc_cols)
[ "def", "to_frame", "(", "data_list", ",", "exc_cols", "=", "None", ",", "*", "*", "kwargs", ")", ":", "from", "collections", "import", "OrderedDict", "return", "pd", ".", "DataFrame", "(", "pd", ".", "Series", "(", "data_list", ")", ".", "apply", "(", ...
Dict in Python 3.6 keeps insertion order, but cannot be relied upon This method is to keep column names in order In Python 3.7 this method is redundant Args: data_list: list of dict exc_cols: exclude columns Returns: pd.DataFrame Example: >>> d_list = [ ... dict(sid=1, symbol='1 HK', price=89), ... dict(sid=700, symbol='700 HK', price=350) ... ] >>> to_frame(d_list) sid symbol price 0 1 1 HK 89 1 700 700 HK 350 >>> to_frame(d_list, exc_cols=['price']) sid symbol 0 1 1 HK 1 700 700 HK
[ "Dict", "in", "Python", "3", ".", "6", "keeps", "insertion", "order", "but", "cannot", "be", "relied", "upon", "This", "method", "is", "to", "keep", "column", "names", "in", "order", "In", "Python", "3", ".", "7", "this", "method", "is", "redundant" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L262-L293
alpha-xone/xone
xone/utils.py
spline_curve
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs): """ Fit spline curve for given x, y values Args: x: x-values y: y-values step: step size for interpolation val_min: minimum value of result val_max: maximum value of result kind: for scipy.interpolate.interp1d Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’ refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and ‘next’ simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is ‘linear’. **kwargs: additional parameters for interp1d Returns: pd.Series: fitted curve Examples: >>> x = pd.Series([1, 2, 3]) >>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)]) >>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate') >>> r.round(2).index.tolist() [1.0, 1.5, 2.0, 2.5, 3.0] >>> r.round(2).tolist() [3.0, 4.05, 7.39, 12.73, 18.0] >>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4])) >>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate') >>> r_df.round(2) a b 1.00 3.00 3.00 1.50 4.05 3.00 2.00 7.39 3.00 2.50 12.73 3.50 3.00 20.09 4.00 """ from scipy.interpolate import interp1d from collections import OrderedDict if isinstance(y, pd.DataFrame): return pd.DataFrame(OrderedDict([(col, spline_curve( x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind )) for col in y.columns])) fitted_curve = interp1d(x, y, kind=kind, **kwargs) new_x = np.arange(x.min(), x.max() + step / 2., step=step) return pd.Series( new_x, index=new_x, name=y.name if hasattr(y, 'name') else None ).apply(fitted_curve).clip(val_min, val_max)
python
def spline_curve(x, y, step, val_min=0, val_max=None, kind='quadratic', **kwargs): """ Fit spline curve for given x, y values Args: x: x-values y: y-values step: step size for interpolation val_min: minimum value of result val_max: maximum value of result kind: for scipy.interpolate.interp1d Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’ refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and ‘next’ simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is ‘linear’. **kwargs: additional parameters for interp1d Returns: pd.Series: fitted curve Examples: >>> x = pd.Series([1, 2, 3]) >>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)]) >>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate') >>> r.round(2).index.tolist() [1.0, 1.5, 2.0, 2.5, 3.0] >>> r.round(2).tolist() [3.0, 4.05, 7.39, 12.73, 18.0] >>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4])) >>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate') >>> r_df.round(2) a b 1.00 3.00 3.00 1.50 4.05 3.00 2.00 7.39 3.00 2.50 12.73 3.50 3.00 20.09 4.00 """ from scipy.interpolate import interp1d from collections import OrderedDict if isinstance(y, pd.DataFrame): return pd.DataFrame(OrderedDict([(col, spline_curve( x, y.loc[:, col], step=step, val_min=val_min, val_max=val_max, kind=kind )) for col in y.columns])) fitted_curve = interp1d(x, y, kind=kind, **kwargs) new_x = np.arange(x.min(), x.max() + step / 2., step=step) return pd.Series( new_x, index=new_x, name=y.name if hasattr(y, 'name') else None ).apply(fitted_curve).clip(val_min, val_max)
[ "def", "spline_curve", "(", "x", ",", "y", ",", "step", ",", "val_min", "=", "0", ",", "val_max", "=", "None", ",", "kind", "=", "'quadratic'", ",", "*", "*", "kwargs", ")", ":", "from", "scipy", ".", "interpolate", "import", "interp1d", "from", "col...
Fit spline curve for given x, y values Args: x: x-values y: y-values step: step size for interpolation val_min: minimum value of result val_max: maximum value of result kind: for scipy.interpolate.interp1d Specifies the kind of interpolation as a string (‘linear’, ‘nearest’, ‘zero’, ‘slinear’, ‘quadratic’, ‘cubic’, ‘previous’, ‘next’, where ‘zero’, ‘slinear’, ‘quadratic’ and ‘cubic’ refer to a spline interpolation of zeroth, first, second or third order; ‘previous’ and ‘next’ simply return the previous or next value of the point) or as an integer specifying the order of the spline interpolator to use. Default is ‘linear’. **kwargs: additional parameters for interp1d Returns: pd.Series: fitted curve Examples: >>> x = pd.Series([1, 2, 3]) >>> y = pd.Series([np.exp(1), np.exp(2), np.exp(3)]) >>> r = spline_curve(x=x, y=y, step=.5, val_min=3, val_max=18, fill_value='extrapolate') >>> r.round(2).index.tolist() [1.0, 1.5, 2.0, 2.5, 3.0] >>> r.round(2).tolist() [3.0, 4.05, 7.39, 12.73, 18.0] >>> y_df = pd.DataFrame(dict(a=[np.exp(1), np.exp(2), np.exp(3)], b=[2, 3, 4])) >>> r_df = spline_curve(x=x, y=y_df, step=.5, val_min=3, fill_value='extrapolate') >>> r_df.round(2) a b 1.00 3.00 3.00 1.50 4.05 3.00 2.00 7.39 3.00 2.50 12.73 3.50 3.00 20.09 4.00
[ "Fit", "spline", "curve", "for", "given", "x", "y", "values" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L296-L346
alpha-xone/xone
xone/utils.py
format_float
def format_float(digit=0, is_pct=False): """ Number display format for pandas Args: digit: number of digits to keep if negative, add one space in front of positive pct is_pct: % display Returns: lambda function to format floats Examples: >>> format_float(0)(1e5) '100,000' >>> format_float(1)(1e5) '100,000.0' >>> format_float(-1, True)(.2) ' 20.0%' >>> format_float(-1, True)(-.2) '-20.0%' >>> pd.options.display.float_format = format_float(2) """ if is_pct: space = ' ' if digit < 0 else '' fmt = f'{{:{space}.{abs(int(digit))}%}}' return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv) else: return lambda vv: 'NaN' if np.isnan(vv) else ( f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit) )
python
def format_float(digit=0, is_pct=False): """ Number display format for pandas Args: digit: number of digits to keep if negative, add one space in front of positive pct is_pct: % display Returns: lambda function to format floats Examples: >>> format_float(0)(1e5) '100,000' >>> format_float(1)(1e5) '100,000.0' >>> format_float(-1, True)(.2) ' 20.0%' >>> format_float(-1, True)(-.2) '-20.0%' >>> pd.options.display.float_format = format_float(2) """ if is_pct: space = ' ' if digit < 0 else '' fmt = f'{{:{space}.{abs(int(digit))}%}}' return lambda vv: 'NaN' if np.isnan(vv) else fmt.format(vv) else: return lambda vv: 'NaN' if np.isnan(vv) else ( f'{{:,.{digit}f}}'.format(vv) if vv else '-' + ' ' * abs(digit) )
[ "def", "format_float", "(", "digit", "=", "0", ",", "is_pct", "=", "False", ")", ":", "if", "is_pct", ":", "space", "=", "' '", "if", "digit", "<", "0", "else", "''", "fmt", "=", "f'{{:{space}.{abs(int(digit))}%}}'", "return", "lambda", "vv", ":", "'NaN'...
Number display format for pandas Args: digit: number of digits to keep if negative, add one space in front of positive pct is_pct: % display Returns: lambda function to format floats Examples: >>> format_float(0)(1e5) '100,000' >>> format_float(1)(1e5) '100,000.0' >>> format_float(-1, True)(.2) ' 20.0%' >>> format_float(-1, True)(-.2) '-20.0%' >>> pd.options.display.float_format = format_float(2)
[ "Number", "display", "format", "for", "pandas" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L369-L400
alpha-xone/xone
xone/utils.py
inst_repr
def inst_repr(instance, fmt='str', public_only=True): """ Generate class instance signature from its __dict__ From python 3.6 dict is ordered and order of attributes will be preserved automatically Args: instance: class instance fmt: ['json', 'str'] public_only: if display public members only Returns: str: string or json representation of instance Examples: >>> inst_repr(1) '' >>> class SampleClass(object): ... def __init__(self): ... self.b = 3 ... self.a = 4 ... self._private_ = 'hidden' >>> >>> s = SampleClass() >>> inst_repr(s) '{b=3, a=4}' >>> inst_repr(s, public_only=False) '{b=3, a=4, _private_=hidden}' >>> json.loads(inst_repr(s, fmt='json')) {'b': 3, 'a': 4} >>> inst_repr(s, fmt='unknown') '' """ if not hasattr(instance, '__dict__'): return '' if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'} else: inst_dict = instance.__dict__ if fmt == 'json': return json.dumps(inst_dict, indent=2) elif fmt == 'str': return to_str(inst_dict, public_only=public_only) return ''
python
def inst_repr(instance, fmt='str', public_only=True): """ Generate class instance signature from its __dict__ From python 3.6 dict is ordered and order of attributes will be preserved automatically Args: instance: class instance fmt: ['json', 'str'] public_only: if display public members only Returns: str: string or json representation of instance Examples: >>> inst_repr(1) '' >>> class SampleClass(object): ... def __init__(self): ... self.b = 3 ... self.a = 4 ... self._private_ = 'hidden' >>> >>> s = SampleClass() >>> inst_repr(s) '{b=3, a=4}' >>> inst_repr(s, public_only=False) '{b=3, a=4, _private_=hidden}' >>> json.loads(inst_repr(s, fmt='json')) {'b': 3, 'a': 4} >>> inst_repr(s, fmt='unknown') '' """ if not hasattr(instance, '__dict__'): return '' if public_only: inst_dict = {k: v for k, v in instance.__dict__.items() if k[0] != '_'} else: inst_dict = instance.__dict__ if fmt == 'json': return json.dumps(inst_dict, indent=2) elif fmt == 'str': return to_str(inst_dict, public_only=public_only) return ''
[ "def", "inst_repr", "(", "instance", ",", "fmt", "=", "'str'", ",", "public_only", "=", "True", ")", ":", "if", "not", "hasattr", "(", "instance", ",", "'__dict__'", ")", ":", "return", "''", "if", "public_only", ":", "inst_dict", "=", "{", "k", ":", ...
Generate class instance signature from its __dict__ From python 3.6 dict is ordered and order of attributes will be preserved automatically Args: instance: class instance fmt: ['json', 'str'] public_only: if display public members only Returns: str: string or json representation of instance Examples: >>> inst_repr(1) '' >>> class SampleClass(object): ... def __init__(self): ... self.b = 3 ... self.a = 4 ... self._private_ = 'hidden' >>> >>> s = SampleClass() >>> inst_repr(s) '{b=3, a=4}' >>> inst_repr(s, public_only=False) '{b=3, a=4, _private_=hidden}' >>> json.loads(inst_repr(s, fmt='json')) {'b': 3, 'a': 4} >>> inst_repr(s, fmt='unknown') ''
[ "Generate", "class", "instance", "signature", "from", "its", "__dict__", "From", "python", "3", ".", "6", "dict", "is", "ordered", "and", "order", "of", "attributes", "will", "be", "preserved", "automatically" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/utils.py#L469-L509
tkf/rash
rash/utils/sqlconstructor.py
concat_expr
def concat_expr(operator, conditions): """ Concatenate `conditions` with `operator` and wrap it by (). It returns a string in a list or empty list, if `conditions` is empty. """ expr = " {0} ".format(operator).join(conditions) return ["({0})".format(expr)] if expr else []
python
def concat_expr(operator, conditions): """ Concatenate `conditions` with `operator` and wrap it by (). It returns a string in a list or empty list, if `conditions` is empty. """ expr = " {0} ".format(operator).join(conditions) return ["({0})".format(expr)] if expr else []
[ "def", "concat_expr", "(", "operator", ",", "conditions", ")", ":", "expr", "=", "\" {0} \"", ".", "format", "(", "operator", ")", ".", "join", "(", "conditions", ")", "return", "[", "\"({0})\"", ".", "format", "(", "expr", ")", "]", "if", "expr", "els...
Concatenate `conditions` with `operator` and wrap it by (). It returns a string in a list or empty list, if `conditions` is empty.
[ "Concatenate", "conditions", "with", "operator", "and", "wrap", "it", "by", "()", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/sqlconstructor.py#L21-L29
tkf/rash
rash/utils/sqlconstructor.py
SQLConstructor.join
def join(self, source, op='LEFT JOIN', on=''): """ Join `source`. >>> sc = SQLConstructor('main', ['c1', 'c2']) >>> sc.join('sub', 'JOIN', 'main.id = sub.id') >>> (sql, params, keys) = sc.compile() >>> sql 'SELECT c1, c2 FROM main JOIN sub ON main.id = sub.id' It is possible to pass another `SQLConstructor` as a source. >>> sc = SQLConstructor('main', ['c1', 'c2']) >>> sc.add_or_matches('{0} = {1}', 'c1', [111]) >>> subsc = SQLConstructor('sub', ['d1', 'd2']) >>> subsc.add_or_matches('{0} = {1}', 'd1', ['abc']) >>> sc.join(subsc, 'JOIN', 'main.id = sub.id') >>> sc.add_column('d1') >>> (sql, params, keys) = sc.compile() >>> print(sql) # doctest: +NORMALIZE_WHITESPACE SELECT c1, c2, d1 FROM main JOIN ( SELECT d1, d2 FROM sub WHERE (d1 = ?) ) ON main.id = sub.id WHERE (c1 = ?) `params` is set appropriately to include parameters for joined source: >>> params ['abc', 111] Note that `subsc.compile` is called when `sc.join(subsc, ...)` is called. Therefore, calling `subsc.add_<predicate>` does not effect `sc`. :type source: str or SQLConstructor :arg source: table :type op: str :arg op: operation (e.g., 'JOIN') :type on: str :arg on: on clause. `source` ("right" source) can be referred using `{r}` formatting field. """ if isinstance(source, SQLConstructor): (sql, params, _) = source.compile() self.join_params.extend(params) jsrc = '( {0} )'.format(sql) if source.table_alias: jsrc += ' AS ' + source.table_alias on = on.format(r=source.table_alias) else: jsrc = source on = on.format(r=source) constraint = 'ON {0}'.format(on) if on else '' self.join_source = ' '.join([self.join_source, op, jsrc, constraint])
python
def join(self, source, op='LEFT JOIN', on=''): """ Join `source`. >>> sc = SQLConstructor('main', ['c1', 'c2']) >>> sc.join('sub', 'JOIN', 'main.id = sub.id') >>> (sql, params, keys) = sc.compile() >>> sql 'SELECT c1, c2 FROM main JOIN sub ON main.id = sub.id' It is possible to pass another `SQLConstructor` as a source. >>> sc = SQLConstructor('main', ['c1', 'c2']) >>> sc.add_or_matches('{0} = {1}', 'c1', [111]) >>> subsc = SQLConstructor('sub', ['d1', 'd2']) >>> subsc.add_or_matches('{0} = {1}', 'd1', ['abc']) >>> sc.join(subsc, 'JOIN', 'main.id = sub.id') >>> sc.add_column('d1') >>> (sql, params, keys) = sc.compile() >>> print(sql) # doctest: +NORMALIZE_WHITESPACE SELECT c1, c2, d1 FROM main JOIN ( SELECT d1, d2 FROM sub WHERE (d1 = ?) ) ON main.id = sub.id WHERE (c1 = ?) `params` is set appropriately to include parameters for joined source: >>> params ['abc', 111] Note that `subsc.compile` is called when `sc.join(subsc, ...)` is called. Therefore, calling `subsc.add_<predicate>` does not effect `sc`. :type source: str or SQLConstructor :arg source: table :type op: str :arg op: operation (e.g., 'JOIN') :type on: str :arg on: on clause. `source` ("right" source) can be referred using `{r}` formatting field. """ if isinstance(source, SQLConstructor): (sql, params, _) = source.compile() self.join_params.extend(params) jsrc = '( {0} )'.format(sql) if source.table_alias: jsrc += ' AS ' + source.table_alias on = on.format(r=source.table_alias) else: jsrc = source on = on.format(r=source) constraint = 'ON {0}'.format(on) if on else '' self.join_source = ' '.join([self.join_source, op, jsrc, constraint])
[ "def", "join", "(", "self", ",", "source", ",", "op", "=", "'LEFT JOIN'", ",", "on", "=", "''", ")", ":", "if", "isinstance", "(", "source", ",", "SQLConstructor", ")", ":", "(", "sql", ",", "params", ",", "_", ")", "=", "source", ".", "compile", ...
Join `source`. >>> sc = SQLConstructor('main', ['c1', 'c2']) >>> sc.join('sub', 'JOIN', 'main.id = sub.id') >>> (sql, params, keys) = sc.compile() >>> sql 'SELECT c1, c2 FROM main JOIN sub ON main.id = sub.id' It is possible to pass another `SQLConstructor` as a source. >>> sc = SQLConstructor('main', ['c1', 'c2']) >>> sc.add_or_matches('{0} = {1}', 'c1', [111]) >>> subsc = SQLConstructor('sub', ['d1', 'd2']) >>> subsc.add_or_matches('{0} = {1}', 'd1', ['abc']) >>> sc.join(subsc, 'JOIN', 'main.id = sub.id') >>> sc.add_column('d1') >>> (sql, params, keys) = sc.compile() >>> print(sql) # doctest: +NORMALIZE_WHITESPACE SELECT c1, c2, d1 FROM main JOIN ( SELECT d1, d2 FROM sub WHERE (d1 = ?) ) ON main.id = sub.id WHERE (c1 = ?) `params` is set appropriately to include parameters for joined source: >>> params ['abc', 111] Note that `subsc.compile` is called when `sc.join(subsc, ...)` is called. Therefore, calling `subsc.add_<predicate>` does not effect `sc`. :type source: str or SQLConstructor :arg source: table :type op: str :arg op: operation (e.g., 'JOIN') :type on: str :arg on: on clause. `source` ("right" source) can be referred using `{r}` formatting field.
[ "Join", "source", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/sqlconstructor.py#L84-L139
tkf/rash
rash/utils/sqlconstructor.py
SQLConstructor.compile
def compile(self): """ Compile SQL and return 3-tuple ``(sql, params, keys)``. Example usage:: (sql, params, keys) = sc.compile() for row in cursor.execute(sql, params): record = dict(zip(keys, row)) """ params = self.column_params + self.join_params + self.params if self.limit and self.limit >= 0: self.sql_limit = 'LIMIT ?' params += [self.limit] return (self.sql, params, self.keys)
python
def compile(self): """ Compile SQL and return 3-tuple ``(sql, params, keys)``. Example usage:: (sql, params, keys) = sc.compile() for row in cursor.execute(sql, params): record = dict(zip(keys, row)) """ params = self.column_params + self.join_params + self.params if self.limit and self.limit >= 0: self.sql_limit = 'LIMIT ?' params += [self.limit] return (self.sql, params, self.keys)
[ "def", "compile", "(", "self", ")", ":", "params", "=", "self", ".", "column_params", "+", "self", ".", "join_params", "+", "self", ".", "params", "if", "self", ".", "limit", "and", "self", ".", "limit", ">=", "0", ":", "self", ".", "sql_limit", "=",...
Compile SQL and return 3-tuple ``(sql, params, keys)``. Example usage:: (sql, params, keys) = sc.compile() for row in cursor.execute(sql, params): record = dict(zip(keys, row))
[ "Compile", "SQL", "and", "return", "3", "-", "tuple", "(", "sql", "params", "keys", ")", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/sqlconstructor.py#L175-L190
tkf/rash
rash/utils/sqlconstructor.py
SQLConstructor.add_and_matches
def add_and_matches(self, matcher, lhs, params, numq=1, flatten=None): """ Add AND conditions to match to `params`. :type matcher: str or callable :arg matcher: if `str`, `matcher.format` is used. :type lhs: str :arg lhs: the first argument to `matcher`. :type params: list :arg params: each element should be able to feed into sqlite '?'. :type numq: int :arg numq: number of parameters for each condition. :type flatten: None or callable :arg flatten: when `numq > 1`, it should return a list of length `numq * len(params)`. """ params = self._adapt_params(params) qs = ['?'] * numq flatten = flatten or self._default_flatten(numq) expr = repeat(adapt_matcher(matcher)(lhs, *qs), len(params)) self.conditions.extend(expr) self.params.extend(flatten(params))
python
def add_and_matches(self, matcher, lhs, params, numq=1, flatten=None): """ Add AND conditions to match to `params`. :type matcher: str or callable :arg matcher: if `str`, `matcher.format` is used. :type lhs: str :arg lhs: the first argument to `matcher`. :type params: list :arg params: each element should be able to feed into sqlite '?'. :type numq: int :arg numq: number of parameters for each condition. :type flatten: None or callable :arg flatten: when `numq > 1`, it should return a list of length `numq * len(params)`. """ params = self._adapt_params(params) qs = ['?'] * numq flatten = flatten or self._default_flatten(numq) expr = repeat(adapt_matcher(matcher)(lhs, *qs), len(params)) self.conditions.extend(expr) self.params.extend(flatten(params))
[ "def", "add_and_matches", "(", "self", ",", "matcher", ",", "lhs", ",", "params", ",", "numq", "=", "1", ",", "flatten", "=", "None", ")", ":", "params", "=", "self", ".", "_adapt_params", "(", "params", ")", "qs", "=", "[", "'?'", "]", "*", "numq"...
Add AND conditions to match to `params`. :type matcher: str or callable :arg matcher: if `str`, `matcher.format` is used. :type lhs: str :arg lhs: the first argument to `matcher`. :type params: list :arg params: each element should be able to feed into sqlite '?'. :type numq: int :arg numq: number of parameters for each condition. :type flatten: None or callable :arg flatten: when `numq > 1`, it should return a list of length `numq * len(params)`.
[ "Add", "AND", "conditions", "to", "match", "to", "params", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/sqlconstructor.py#L208-L230
tkf/rash
rash/utils/sqlconstructor.py
SQLConstructor.add_matches
def add_matches(self, matcher, lhs, match_params=[], include_params=[], exclude_params=[], numq=1, flatten=None): """ Quick way to call `add_or_matches` and `add_and_matches`. """ matcher = adapt_matcher(matcher) notmatcher = negate(matcher) self.add_and_matches(matcher, lhs, match_params, numq, flatten) self.add_or_matches(matcher, lhs, include_params, numq, flatten) self.add_and_matches(notmatcher, lhs, exclude_params, numq, flatten)
python
def add_matches(self, matcher, lhs, match_params=[], include_params=[], exclude_params=[], numq=1, flatten=None): """ Quick way to call `add_or_matches` and `add_and_matches`. """ matcher = adapt_matcher(matcher) notmatcher = negate(matcher) self.add_and_matches(matcher, lhs, match_params, numq, flatten) self.add_or_matches(matcher, lhs, include_params, numq, flatten) self.add_and_matches(notmatcher, lhs, exclude_params, numq, flatten)
[ "def", "add_matches", "(", "self", ",", "matcher", ",", "lhs", ",", "match_params", "=", "[", "]", ",", "include_params", "=", "[", "]", ",", "exclude_params", "=", "[", "]", ",", "numq", "=", "1", ",", "flatten", "=", "None", ")", ":", "matcher", ...
Quick way to call `add_or_matches` and `add_and_matches`.
[ "Quick", "way", "to", "call", "add_or_matches", "and", "add_and_matches", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/sqlconstructor.py#L243-L253
tkf/rash
rash/utils/sqlconstructor.py
SQLConstructor.uniquify_by
def uniquify_by(self, column, chooser=None, aggregate='MAX'): """ Group by `column` and run `aggregate` function on `chooser` column. """ self.group_by.append(column) if chooser: i = self.columns.index(chooser) self.columns[i] = '{0}({1})'.format(aggregate, self.columns[i])
python
def uniquify_by(self, column, chooser=None, aggregate='MAX'): """ Group by `column` and run `aggregate` function on `chooser` column. """ self.group_by.append(column) if chooser: i = self.columns.index(chooser) self.columns[i] = '{0}({1})'.format(aggregate, self.columns[i])
[ "def", "uniquify_by", "(", "self", ",", "column", ",", "chooser", "=", "None", ",", "aggregate", "=", "'MAX'", ")", ":", "self", ".", "group_by", ".", "append", "(", "column", ")", "if", "chooser", ":", "i", "=", "self", ".", "columns", ".", "index",...
Group by `column` and run `aggregate` function on `chooser` column.
[ "Group", "by", "column", "and", "run", "aggregate", "function", "on", "chooser", "column", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/sqlconstructor.py#L255-L262
tkf/rash
rash/utils/sqlconstructor.py
SQLConstructor.move_where_clause_to_column
def move_where_clause_to_column(self, column='condition', key=None): """ Move whole WHERE clause to a column named `column`. """ if self.conditions: expr = " AND ".join(self.conditions) params = self.params self.params = [] self.conditions = [] else: expr = '1' params = [] self.add_column('({0}) AS {1}'.format(expr, column), key or column, params)
python
def move_where_clause_to_column(self, column='condition', key=None): """ Move whole WHERE clause to a column named `column`. """ if self.conditions: expr = " AND ".join(self.conditions) params = self.params self.params = [] self.conditions = [] else: expr = '1' params = [] self.add_column('({0}) AS {1}'.format(expr, column), key or column, params)
[ "def", "move_where_clause_to_column", "(", "self", ",", "column", "=", "'condition'", ",", "key", "=", "None", ")", ":", "if", "self", ".", "conditions", ":", "expr", "=", "\" AND \"", ".", "join", "(", "self", ".", "conditions", ")", "params", "=", "sel...
Move whole WHERE clause to a column named `column`.
[ "Move", "whole", "WHERE", "clause", "to", "a", "column", "named", "column", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/sqlconstructor.py#L275-L289
INM-6/hybridLFPy
examples/example_plotting.py
remove_axis_junk
def remove_axis_junk(ax, which=['right', 'top']): '''remove upper and right axis''' for loc, spine in ax.spines.items(): if loc in which: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left')
python
def remove_axis_junk(ax, which=['right', 'top']): '''remove upper and right axis''' for loc, spine in ax.spines.items(): if loc in which: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left')
[ "def", "remove_axis_junk", "(", "ax", ",", "which", "=", "[", "'right'", ",", "'top'", "]", ")", ":", "for", "loc", ",", "spine", "in", "ax", ".", "spines", ".", "items", "(", ")", ":", "if", "loc", "in", "which", ":", "spine", ".", "set_color", ...
remove upper and right axis
[ "remove", "upper", "and", "right", "axis" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_plotting.py#L13-L19
INM-6/hybridLFPy
examples/example_plotting.py
plot_signal_sum
def plot_signal_sum(ax, z, fname='LFPsum.h5', unit='mV', ylabels=True, scalebar=True, vlimround=None, T=[0, 1000], color='k', label=''): ''' on axes plot the signal contributions args: :: ax : matplotlib.axes.AxesSubplot object z : np.ndarray T : list, [tstart, tstop], which timeinterval ylims : list, set range of yaxis to scale with other plots fancy : bool, scaling_factor : float, scaling factor (e.g. to scale 10% data set up) ''' #open file and get data, samplingrate f = h5py.File(fname) data = f['data'].value dataT = data.T - data.mean(axis=1) data = dataT.T srate = f['srate'].value #close file object f.close() # normalize data for plot tvec = np.arange(data.shape[1]) * 1000. / srate slica = (tvec <= T[1]) & (tvec >= T[0]) zvec = np.r_[z] zvec = np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]] vlim = abs(data[:, slica]).max() if vlimround is None: vlimround = 2.**np.round(np.log2(vlim)) yticklabels=[] yticks = [] colors = [color]*data.shape[0] for i, z in enumerate(z): if i == 0: ax.plot(tvec[slica], data[i, slica] * 100 / vlimround + z, color=colors[i], rasterized=False, label=label, clip_on=False) else: ax.plot(tvec[slica], data[i, slica] * 100 / vlimround + z, color=colors[i], rasterized=False, clip_on=False) yticklabels.append('ch. %i' % (i+1)) yticks.append(z) if scalebar: ax.plot([tvec[slica][-1], tvec[slica][-1]], [-0, -100], lw=2, color='k', clip_on=False) ax.text(tvec[slica][-1]+np.diff(T)*0.02, -50, r'%g %s' % (vlimround, unit), color='k', rotation='vertical') ax.axis(ax.axis('tight')) ax.yaxis.set_ticks(yticks) if ylabels: ax.yaxis.set_ticklabels(yticklabels) else: ax.yaxis.set_ticklabels([]) for loc, spine in ax.spines.items(): if loc in ['right', 'top']: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.set_xlabel(r'time (ms)', labelpad=0)
python
def plot_signal_sum(ax, z, fname='LFPsum.h5', unit='mV', ylabels=True, scalebar=True, vlimround=None, T=[0, 1000], color='k', label=''): ''' on axes plot the signal contributions args: :: ax : matplotlib.axes.AxesSubplot object z : np.ndarray T : list, [tstart, tstop], which timeinterval ylims : list, set range of yaxis to scale with other plots fancy : bool, scaling_factor : float, scaling factor (e.g. to scale 10% data set up) ''' #open file and get data, samplingrate f = h5py.File(fname) data = f['data'].value dataT = data.T - data.mean(axis=1) data = dataT.T srate = f['srate'].value #close file object f.close() # normalize data for plot tvec = np.arange(data.shape[1]) * 1000. / srate slica = (tvec <= T[1]) & (tvec >= T[0]) zvec = np.r_[z] zvec = np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]] vlim = abs(data[:, slica]).max() if vlimround is None: vlimround = 2.**np.round(np.log2(vlim)) yticklabels=[] yticks = [] colors = [color]*data.shape[0] for i, z in enumerate(z): if i == 0: ax.plot(tvec[slica], data[i, slica] * 100 / vlimround + z, color=colors[i], rasterized=False, label=label, clip_on=False) else: ax.plot(tvec[slica], data[i, slica] * 100 / vlimround + z, color=colors[i], rasterized=False, clip_on=False) yticklabels.append('ch. %i' % (i+1)) yticks.append(z) if scalebar: ax.plot([tvec[slica][-1], tvec[slica][-1]], [-0, -100], lw=2, color='k', clip_on=False) ax.text(tvec[slica][-1]+np.diff(T)*0.02, -50, r'%g %s' % (vlimround, unit), color='k', rotation='vertical') ax.axis(ax.axis('tight')) ax.yaxis.set_ticks(yticks) if ylabels: ax.yaxis.set_ticklabels(yticklabels) else: ax.yaxis.set_ticklabels([]) for loc, spine in ax.spines.items(): if loc in ['right', 'top']: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.set_xlabel(r'time (ms)', labelpad=0)
[ "def", "plot_signal_sum", "(", "ax", ",", "z", ",", "fname", "=", "'LFPsum.h5'", ",", "unit", "=", "'mV'", ",", "ylabels", "=", "True", ",", "scalebar", "=", "True", ",", "vlimround", "=", "None", ",", "T", "=", "[", "0", ",", "1000", "]", ",", "...
on axes plot the signal contributions args: :: ax : matplotlib.axes.AxesSubplot object z : np.ndarray T : list, [tstart, tstop], which timeinterval ylims : list, set range of yaxis to scale with other plots fancy : bool, scaling_factor : float, scaling factor (e.g. to scale 10% data set up)
[ "on", "axes", "plot", "the", "signal", "contributions", "args", ":", "::", "ax", ":", "matplotlib", ".", "axes", ".", "AxesSubplot", "object", "z", ":", "np", ".", "ndarray", "T", ":", "list", "[", "tstart", "tstop", "]", "which", "timeinterval", "ylims"...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_plotting.py#L22-L91
INM-6/hybridLFPy
examples/example_plotting.py
plot_population
def plot_population(ax, populationParams, electrodeParams, layerBoundaries, aspect='equal', isometricangle=np.pi/12, X=['EX', 'IN'], markers=['^', 'o'], colors=['r', 'b'], layers = ['upper', 'lower'], title='positions'): ''' Plot the geometry of the column model, optionally with somatic locations and optionally with reconstructed neurons kwargs: :: ax : matplotlib.axes.AxesSubplot aspect : str matplotlib.axis argument isometricangle : float pseudo-3d view angle plot_somas : bool plot soma locations plot_morphos : bool plot full morphologies num_unitsE : int number of excitatory morphos plotted per population num_unitsI : int number of inhibitory morphos plotted per population clip_dendrites : bool draw dendrites outside of axis mainpops : bool if True, plot only main pops, e.g. b23 and nb23 as L23I return: :: axis : list the plt.axis() corresponding to input aspect ''' remove_axis_junk(ax, ['right', 'bottom', 'left', 'top']) # DRAW OUTLINE OF POPULATIONS ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) #contact points ax.plot(electrodeParams['x'], electrodeParams['z'], '.', marker='o', markersize=5, color='k', zorder=0) #outline of electrode x_0 = np.array(electrodeParams['r_z'])[1, 1:-1] z_0 = np.array(electrodeParams['r_z'])[0, 1:-1] x = np.r_[x_0[-1], x_0[::-1], -x_0[1:], -x_0[-1]] z = np.r_[100, z_0[::-1], z_0[1:], 100] ax.fill(x, z, color=(0.5, 0.5, 0.5), lw=None, zorder=-0.1) #outline of populations: #fetch the population radius from some population r = populationParams[populationParams.keys()[0]]['radius'] theta0 = np.linspace(0, np.pi, 20) theta1 = np.linspace(np.pi, 2*np.pi, 20) zpos = np.r_[np.array(layerBoundaries)[:, 0], np.array(layerBoundaries)[-1, 1]] for i, z in enumerate(np.mean(layerBoundaries, axis=1)): ax.text(r, z, ' %s' % layers[i], va='center', ha='left', rotation='vertical') for i, zval in enumerate(zpos): if i == 0: ax.plot(r*np.cos(theta0), r*np.sin(theta0)*np.sin(isometricangle)+zval, color='k', zorder=-r, clip_on=False) ax.plot(r*np.cos(theta1), r*np.sin(theta1)*np.sin(isometricangle)+zval, color='k', zorder=r, clip_on=False) else: ax.plot(r*np.cos(theta0), r*np.sin(theta0)*np.sin(isometricangle)+zval, color='gray', zorder=-r, clip_on=False) ax.plot(r*np.cos(theta1), r*np.sin(theta1)*np.sin(isometricangle)+zval, color='k', zorder=r, clip_on=False) ax.plot([-r, -r], [zpos[0], zpos[-1]], 'k', zorder=0, clip_on=False) ax.plot([r, r], [zpos[0], zpos[-1]], 'k', zorder=0, clip_on=False) #plot a horizontal radius scalebar ax.plot([0, r], [z_0.min()]*2, 'k', lw=2, zorder=0, clip_on=False) ax.text(r / 2., z_0.min()-100, 'r = %i $\mu$m' % int(r), ha='center') #plot a vertical depth scalebar ax.plot([-r]*2, [z_0.min()+50, z_0.min()-50], 'k', lw=2, zorder=0, clip_on=False) ax.text(-r, z_0.min(), r'100 $\mu$m', va='center', ha='right') ax.set_yticks([]) ax.set_yticklabels([]) #fake ticks: for pos in zpos: ax.text(-r, pos, 'z=%i-' % int(pos), ha='right', va='center') ax.set_title(title) axis = ax.axis(ax.axis(aspect))
python
def plot_population(ax, populationParams, electrodeParams, layerBoundaries, aspect='equal', isometricangle=np.pi/12, X=['EX', 'IN'], markers=['^', 'o'], colors=['r', 'b'], layers = ['upper', 'lower'], title='positions'): ''' Plot the geometry of the column model, optionally with somatic locations and optionally with reconstructed neurons kwargs: :: ax : matplotlib.axes.AxesSubplot aspect : str matplotlib.axis argument isometricangle : float pseudo-3d view angle plot_somas : bool plot soma locations plot_morphos : bool plot full morphologies num_unitsE : int number of excitatory morphos plotted per population num_unitsI : int number of inhibitory morphos plotted per population clip_dendrites : bool draw dendrites outside of axis mainpops : bool if True, plot only main pops, e.g. b23 and nb23 as L23I return: :: axis : list the plt.axis() corresponding to input aspect ''' remove_axis_junk(ax, ['right', 'bottom', 'left', 'top']) # DRAW OUTLINE OF POPULATIONS ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) #contact points ax.plot(electrodeParams['x'], electrodeParams['z'], '.', marker='o', markersize=5, color='k', zorder=0) #outline of electrode x_0 = np.array(electrodeParams['r_z'])[1, 1:-1] z_0 = np.array(electrodeParams['r_z'])[0, 1:-1] x = np.r_[x_0[-1], x_0[::-1], -x_0[1:], -x_0[-1]] z = np.r_[100, z_0[::-1], z_0[1:], 100] ax.fill(x, z, color=(0.5, 0.5, 0.5), lw=None, zorder=-0.1) #outline of populations: #fetch the population radius from some population r = populationParams[populationParams.keys()[0]]['radius'] theta0 = np.linspace(0, np.pi, 20) theta1 = np.linspace(np.pi, 2*np.pi, 20) zpos = np.r_[np.array(layerBoundaries)[:, 0], np.array(layerBoundaries)[-1, 1]] for i, z in enumerate(np.mean(layerBoundaries, axis=1)): ax.text(r, z, ' %s' % layers[i], va='center', ha='left', rotation='vertical') for i, zval in enumerate(zpos): if i == 0: ax.plot(r*np.cos(theta0), r*np.sin(theta0)*np.sin(isometricangle)+zval, color='k', zorder=-r, clip_on=False) ax.plot(r*np.cos(theta1), r*np.sin(theta1)*np.sin(isometricangle)+zval, color='k', zorder=r, clip_on=False) else: ax.plot(r*np.cos(theta0), r*np.sin(theta0)*np.sin(isometricangle)+zval, color='gray', zorder=-r, clip_on=False) ax.plot(r*np.cos(theta1), r*np.sin(theta1)*np.sin(isometricangle)+zval, color='k', zorder=r, clip_on=False) ax.plot([-r, -r], [zpos[0], zpos[-1]], 'k', zorder=0, clip_on=False) ax.plot([r, r], [zpos[0], zpos[-1]], 'k', zorder=0, clip_on=False) #plot a horizontal radius scalebar ax.plot([0, r], [z_0.min()]*2, 'k', lw=2, zorder=0, clip_on=False) ax.text(r / 2., z_0.min()-100, 'r = %i $\mu$m' % int(r), ha='center') #plot a vertical depth scalebar ax.plot([-r]*2, [z_0.min()+50, z_0.min()-50], 'k', lw=2, zorder=0, clip_on=False) ax.text(-r, z_0.min(), r'100 $\mu$m', va='center', ha='right') ax.set_yticks([]) ax.set_yticklabels([]) #fake ticks: for pos in zpos: ax.text(-r, pos, 'z=%i-' % int(pos), ha='right', va='center') ax.set_title(title) axis = ax.axis(ax.axis(aspect))
[ "def", "plot_population", "(", "ax", ",", "populationParams", ",", "electrodeParams", ",", "layerBoundaries", ",", "aspect", "=", "'equal'", ",", "isometricangle", "=", "np", ".", "pi", "/", "12", ",", "X", "=", "[", "'EX'", ",", "'IN'", "]", ",", "marke...
Plot the geometry of the column model, optionally with somatic locations and optionally with reconstructed neurons kwargs: :: ax : matplotlib.axes.AxesSubplot aspect : str matplotlib.axis argument isometricangle : float pseudo-3d view angle plot_somas : bool plot soma locations plot_morphos : bool plot full morphologies num_unitsE : int number of excitatory morphos plotted per population num_unitsI : int number of inhibitory morphos plotted per population clip_dendrites : bool draw dendrites outside of axis mainpops : bool if True, plot only main pops, e.g. b23 and nb23 as L23I return: :: axis : list the plt.axis() corresponding to input aspect
[ "Plot", "the", "geometry", "of", "the", "column", "model", "optionally", "with", "somatic", "locations", "and", "optionally", "with", "reconstructed", "neurons", "kwargs", ":", "::", "ax", ":", "matplotlib", ".", "axes", ".", "AxesSubplot", "aspect", ":", "str...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_plotting.py#L108-L219
INM-6/hybridLFPy
examples/example_plotting.py
normalize
def normalize(x): '''normalize x to have mean 0 and unity standard deviation''' x = x.astype(float) x -= x.mean() return x / float(x.std())
python
def normalize(x): '''normalize x to have mean 0 and unity standard deviation''' x = x.astype(float) x -= x.mean() return x / float(x.std())
[ "def", "normalize", "(", "x", ")", ":", "x", "=", "x", ".", "astype", "(", "float", ")", "x", "-=", "x", ".", "mean", "(", ")", "return", "x", "/", "float", "(", "x", ".", "std", "(", ")", ")" ]
normalize x to have mean 0 and unity standard deviation
[ "normalize", "x", "to", "have", "mean", "0", "and", "unity", "standard", "deviation" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_plotting.py#L324-L328
INM-6/hybridLFPy
examples/example_plotting.py
plot_correlation
def plot_correlation(z_vec, x0, x1, ax, lag=20., title='firing_rate vs LFP'): ''' mls on axes plot the correlation between x0 and x1 args: :: x0 : first dataset x1 : second dataset - e.g., the multichannel LFP ax : matplotlib.axes.AxesSubplot object title : text to be used as current axis object title ''' zvec = np.r_[z_vec] zvec = np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]] xcorr_all=np.zeros((np.size(z_vec), x0.shape[0])) for i, z in enumerate(z_vec): x2 = x1[i, ] xcorr1 = np.correlate(normalize(x0), normalize(x2), 'same') / x0.size xcorr_all[i,:]=xcorr1 # Find limits for the plot vlim = abs(xcorr_all).max() vlimround = 2.**np.round(np.log2(vlim)) yticklabels=[] yticks = [] ylimfound=np.zeros((1,2)) for i, z in enumerate(z_vec): ind = np.arange(x0.size) - x0.size/2 ax.plot(ind, xcorr_all[i,::-1] * 100. / vlimround + z, 'k', clip_on=True, rasterized=False) yticklabels.append('ch. %i' %(i+1)) yticks.append(z) remove_axis_junk(ax) ax.set_title(title) ax.set_xlabel(r'lag $\tau$ (ms)') ax.set_xlim(-lag, lag) ax.set_ylim(z-100, 100) axis = ax.axis() ax.vlines(0, axis[2], axis[3], 'r', 'dotted') ax.yaxis.set_ticks(yticks) ax.yaxis.set_ticklabels(yticklabels) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') # Create a scaling bar ax.plot([lag, lag], [0, 100], lw=2, color='k', clip_on=False) ax.text(lag, 50, r'CC=%.2f' % vlimround, rotation='vertical', va='center')
python
def plot_correlation(z_vec, x0, x1, ax, lag=20., title='firing_rate vs LFP'): ''' mls on axes plot the correlation between x0 and x1 args: :: x0 : first dataset x1 : second dataset - e.g., the multichannel LFP ax : matplotlib.axes.AxesSubplot object title : text to be used as current axis object title ''' zvec = np.r_[z_vec] zvec = np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]] xcorr_all=np.zeros((np.size(z_vec), x0.shape[0])) for i, z in enumerate(z_vec): x2 = x1[i, ] xcorr1 = np.correlate(normalize(x0), normalize(x2), 'same') / x0.size xcorr_all[i,:]=xcorr1 # Find limits for the plot vlim = abs(xcorr_all).max() vlimround = 2.**np.round(np.log2(vlim)) yticklabels=[] yticks = [] ylimfound=np.zeros((1,2)) for i, z in enumerate(z_vec): ind = np.arange(x0.size) - x0.size/2 ax.plot(ind, xcorr_all[i,::-1] * 100. / vlimround + z, 'k', clip_on=True, rasterized=False) yticklabels.append('ch. %i' %(i+1)) yticks.append(z) remove_axis_junk(ax) ax.set_title(title) ax.set_xlabel(r'lag $\tau$ (ms)') ax.set_xlim(-lag, lag) ax.set_ylim(z-100, 100) axis = ax.axis() ax.vlines(0, axis[2], axis[3], 'r', 'dotted') ax.yaxis.set_ticks(yticks) ax.yaxis.set_ticklabels(yticklabels) ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') # Create a scaling bar ax.plot([lag, lag], [0, 100], lw=2, color='k', clip_on=False) ax.text(lag, 50, r'CC=%.2f' % vlimround, rotation='vertical', va='center')
[ "def", "plot_correlation", "(", "z_vec", ",", "x0", ",", "x1", ",", "ax", ",", "lag", "=", "20.", ",", "title", "=", "'firing_rate vs LFP'", ")", ":", "zvec", "=", "np", ".", "r_", "[", "z_vec", "]", "zvec", "=", "np", ".", "r_", "[", "zvec", ","...
mls on axes plot the correlation between x0 and x1 args: :: x0 : first dataset x1 : second dataset - e.g., the multichannel LFP ax : matplotlib.axes.AxesSubplot object title : text to be used as current axis object title
[ "mls", "on", "axes", "plot", "the", "correlation", "between", "x0", "and", "x1", "args", ":", "::", "x0", ":", "first", "dataset", "x1", ":", "second", "dataset", "-", "e", ".", "g", ".", "the", "multichannel", "LFP", "ax", ":", "matplotlib", ".", "a...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_plotting.py#L331-L386
INM-6/hybridLFPy
hybridLFPy/csd.py
_PrPz
def _PrPz(r0, z0, r1, z1, r2, z2, r3, z3): """ Intersection point for infinite lines. Parameters ---------- r0 : float z0 : float r1 : float z1 : float r2 : float z2 : float r3 : float z3 : float Returns ---------- Pr : float Pz : float hit : bool """ Pr = ((r0*z1 - z0*r1)*(r2 - r3) - (r0 - r1)*(r2*z3 - r3*z2)) / \ ((r0 - r1)*(z2 - z3) - (z0 - z1)*(r2-r3)) Pz = ((r0*z1 - z0*r1)*(z2 - z3) - (z0 - z1)*(r2*z3 - r3*z2)) / \ ((r0 - r1)*(z2 - z3) - (z0 - z1)*(r2-r3)) if Pr >= r0 and Pr <= r1 and Pz >= z0 and Pz <= z1: hit = True elif Pr <= r0 and Pr >= r1 and Pz >= z0 and Pz <= z1: hit = True elif Pr >= r0 and Pr <= r1 and Pz <= z0 and Pz >= z1: hit = True elif Pr <= r0 and Pr >= r1 and Pz <= z0 and Pz >= z1: hit = True else: hit = False return [Pr, Pz, hit]
python
def _PrPz(r0, z0, r1, z1, r2, z2, r3, z3): """ Intersection point for infinite lines. Parameters ---------- r0 : float z0 : float r1 : float z1 : float r2 : float z2 : float r3 : float z3 : float Returns ---------- Pr : float Pz : float hit : bool """ Pr = ((r0*z1 - z0*r1)*(r2 - r3) - (r0 - r1)*(r2*z3 - r3*z2)) / \ ((r0 - r1)*(z2 - z3) - (z0 - z1)*(r2-r3)) Pz = ((r0*z1 - z0*r1)*(z2 - z3) - (z0 - z1)*(r2*z3 - r3*z2)) / \ ((r0 - r1)*(z2 - z3) - (z0 - z1)*(r2-r3)) if Pr >= r0 and Pr <= r1 and Pz >= z0 and Pz <= z1: hit = True elif Pr <= r0 and Pr >= r1 and Pz >= z0 and Pz <= z1: hit = True elif Pr >= r0 and Pr <= r1 and Pz <= z0 and Pz >= z1: hit = True elif Pr <= r0 and Pr >= r1 and Pz <= z0 and Pz >= z1: hit = True else: hit = False return [Pr, Pz, hit]
[ "def", "_PrPz", "(", "r0", ",", "z0", ",", "r1", ",", "z1", ",", "r2", ",", "z2", ",", "r3", ",", "z3", ")", ":", "Pr", "=", "(", "(", "r0", "*", "z1", "-", "z0", "*", "r1", ")", "*", "(", "r2", "-", "r3", ")", "-", "(", "r0", "-", ...
Intersection point for infinite lines. Parameters ---------- r0 : float z0 : float r1 : float z1 : float r2 : float z2 : float r3 : float z3 : float Returns ---------- Pr : float Pz : float hit : bool
[ "Intersection", "point", "for", "infinite", "lines", ".", "Parameters", "----------", "r0", ":", "float", "z0", ":", "float", "r1", ":", "float", "z1", ":", "float", "r2", ":", "float", "z2", ":", "float", "r3", ":", "float", "z3", ":", "float" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/csd.py#L10-L48
INM-6/hybridLFPy
hybridLFPy/csd.py
true_lam_csd
def true_lam_csd(cell, dr=100, z=None): """ Return CSD from membrane currents as function along the coordinates of the electrode along z-axis. Parameters ---------- cell : `LFPy.cell.Cell` or `LFPy.cell.TemplateCell` object. Cell. dr : float Radius of the cylindrical CSD volume. z : numpy.ndarray Z-coordinates of electrode. Returns ---------- CSD : numpy.ndarray Current-source density (in pA * mum^-3). """ if type(z) != type(np.ndarray(shape=0)): raise ValueError('type(z) should be a numpy.ndarray') dz = abs(z[1] - z[0]) CSD = np.zeros((z.size, cell.tvec.size,)) r_end = np.sqrt(cell.xend**2 + cell.yend**2) r_start = np.sqrt(cell.xstart**2 + cell.ystart**2) volume = dz * np.pi * dr**2 for i in range(len(z)): aa0 = cell.zstart < z[i] + dz/2 aa1 = cell.zend < z[i] + dz/2 bb0 = cell.zstart >= z[i] - dz/2 bb1 = cell.zend >= z[i] - dz/2 cc0 = r_start < dr cc1 = r_end < dr ii = aa0 & bb0 & cc0 # startpoint inside volume jj = aa1 & bb1 & cc1 # endpoint inside volume for j in range(cell.zstart.size): # Calc fraction of source being inside control volume from 0-1 # both start and endpoint in volume if ii[j] and jj[j]: CSD[i,] = CSD[i, ] + cell.imem[j, ] / volume # Startpoint in volume: elif ii[j] and not jj[j]: z0 = cell.zstart[j] r0 = r_start[j] z1 = cell.zend[j] r1 = r_end[j] if r0 == r1: # Segment is parallel with z-axis frac = -(z0 - z[i]-dz/2) / (z1 - z0) else: # Not parallel with z-axis L2 = (r1 - r0)**2 + (z1 - z0)**2 z2 = [z[i]-dz/2, z[i]+dz/2, z[i]-dz/2] r2 = [0, 0, dr] z3 = [z[i]-dz/2, z[i]+dz/2, z[i]+dz/2] r3 = [dr, dr, dr] P = [] for k in range(3): P.append(_PrPz(r0, z0, r1, z1, r2[k], z2[k], r3[k], z3[k])) if P[k][2]: vL2 = (P[k][0] - r0)**2 + (P[k][1] -z0)**2 frac = np.sqrt(vL2 / L2) CSD[i,] = CSD[i, ] + frac * cell.imem[j, ] / volume # Endpoint in volume: elif jj[j] and not ii[j]: z0 = cell.zstart[j] r0 = r_start[j] z1 = cell.zend[j] r1 = r_end[j] if r0 == r1: # Segment is parallel with z-axis frac = (z1 - z[i]+dz/2) / (z1 - z0) else: # Not parallel with z-axis L2 = (r1 - r0)**2 + (z1 - z0)**2 z2 = [z[i]-dz/2, z[i]+dz/2, z[i]-dz/2] r2 = [0, 0, dr] z3 = [z[i]-dz/2, z[i]+dz/2, z[i]+dz/2] r3 = [dr, dr, dr] P = [] for k in range(3): P.append(_PrPz(r0, z0, r1, z1, r2[k], z2[k], r3[k], z3[k])) if P[k][2]: vL2 = (r1 - P[k][0])**2 + (z1 - P[k][1])**2 frac = np.sqrt(vL2 / L2) CSD[i,] = CSD[i, ] + frac * cell.imem[j, ] / volume else: pass return CSD
python
def true_lam_csd(cell, dr=100, z=None): """ Return CSD from membrane currents as function along the coordinates of the electrode along z-axis. Parameters ---------- cell : `LFPy.cell.Cell` or `LFPy.cell.TemplateCell` object. Cell. dr : float Radius of the cylindrical CSD volume. z : numpy.ndarray Z-coordinates of electrode. Returns ---------- CSD : numpy.ndarray Current-source density (in pA * mum^-3). """ if type(z) != type(np.ndarray(shape=0)): raise ValueError('type(z) should be a numpy.ndarray') dz = abs(z[1] - z[0]) CSD = np.zeros((z.size, cell.tvec.size,)) r_end = np.sqrt(cell.xend**2 + cell.yend**2) r_start = np.sqrt(cell.xstart**2 + cell.ystart**2) volume = dz * np.pi * dr**2 for i in range(len(z)): aa0 = cell.zstart < z[i] + dz/2 aa1 = cell.zend < z[i] + dz/2 bb0 = cell.zstart >= z[i] - dz/2 bb1 = cell.zend >= z[i] - dz/2 cc0 = r_start < dr cc1 = r_end < dr ii = aa0 & bb0 & cc0 # startpoint inside volume jj = aa1 & bb1 & cc1 # endpoint inside volume for j in range(cell.zstart.size): # Calc fraction of source being inside control volume from 0-1 # both start and endpoint in volume if ii[j] and jj[j]: CSD[i,] = CSD[i, ] + cell.imem[j, ] / volume # Startpoint in volume: elif ii[j] and not jj[j]: z0 = cell.zstart[j] r0 = r_start[j] z1 = cell.zend[j] r1 = r_end[j] if r0 == r1: # Segment is parallel with z-axis frac = -(z0 - z[i]-dz/2) / (z1 - z0) else: # Not parallel with z-axis L2 = (r1 - r0)**2 + (z1 - z0)**2 z2 = [z[i]-dz/2, z[i]+dz/2, z[i]-dz/2] r2 = [0, 0, dr] z3 = [z[i]-dz/2, z[i]+dz/2, z[i]+dz/2] r3 = [dr, dr, dr] P = [] for k in range(3): P.append(_PrPz(r0, z0, r1, z1, r2[k], z2[k], r3[k], z3[k])) if P[k][2]: vL2 = (P[k][0] - r0)**2 + (P[k][1] -z0)**2 frac = np.sqrt(vL2 / L2) CSD[i,] = CSD[i, ] + frac * cell.imem[j, ] / volume # Endpoint in volume: elif jj[j] and not ii[j]: z0 = cell.zstart[j] r0 = r_start[j] z1 = cell.zend[j] r1 = r_end[j] if r0 == r1: # Segment is parallel with z-axis frac = (z1 - z[i]+dz/2) / (z1 - z0) else: # Not parallel with z-axis L2 = (r1 - r0)**2 + (z1 - z0)**2 z2 = [z[i]-dz/2, z[i]+dz/2, z[i]-dz/2] r2 = [0, 0, dr] z3 = [z[i]-dz/2, z[i]+dz/2, z[i]+dz/2] r3 = [dr, dr, dr] P = [] for k in range(3): P.append(_PrPz(r0, z0, r1, z1, r2[k], z2[k], r3[k], z3[k])) if P[k][2]: vL2 = (r1 - P[k][0])**2 + (z1 - P[k][1])**2 frac = np.sqrt(vL2 / L2) CSD[i,] = CSD[i, ] + frac * cell.imem[j, ] / volume else: pass return CSD
[ "def", "true_lam_csd", "(", "cell", ",", "dr", "=", "100", ",", "z", "=", "None", ")", ":", "if", "type", "(", "z", ")", "!=", "type", "(", "np", ".", "ndarray", "(", "shape", "=", "0", ")", ")", ":", "raise", "ValueError", "(", "'type(z) should ...
Return CSD from membrane currents as function along the coordinates of the electrode along z-axis. Parameters ---------- cell : `LFPy.cell.Cell` or `LFPy.cell.TemplateCell` object. Cell. dr : float Radius of the cylindrical CSD volume. z : numpy.ndarray Z-coordinates of electrode. Returns ---------- CSD : numpy.ndarray Current-source density (in pA * mum^-3).
[ "Return", "CSD", "from", "membrane", "currents", "as", "function", "along", "the", "coordinates", "of", "the", "electrode", "along", "z", "-", "axis", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/csd.py#L51-L156
tkf/rash
rash/search.py
search_run
def search_run(output, **kwds): """ Search command history. """ from .config import ConfigStore from .database import DataBase from .query import expand_query, preprocess_kwds cfstore = ConfigStore() kwds = expand_query(cfstore.get_config(), kwds) format = get_formatter(**kwds) fmtkeys = formatter_keys(format) candidates = set([ 'command_count', 'success_count', 'success_ratio', 'program_count']) kwds['additional_columns'] = candidates & set(fmtkeys) db = DataBase(cfstore.db_path) for crec in db.search_command_record(**preprocess_kwds(kwds)): output.write(format.format(**crec.__dict__))
python
def search_run(output, **kwds): """ Search command history. """ from .config import ConfigStore from .database import DataBase from .query import expand_query, preprocess_kwds cfstore = ConfigStore() kwds = expand_query(cfstore.get_config(), kwds) format = get_formatter(**kwds) fmtkeys = formatter_keys(format) candidates = set([ 'command_count', 'success_count', 'success_ratio', 'program_count']) kwds['additional_columns'] = candidates & set(fmtkeys) db = DataBase(cfstore.db_path) for crec in db.search_command_record(**preprocess_kwds(kwds)): output.write(format.format(**crec.__dict__))
[ "def", "search_run", "(", "output", ",", "*", "*", "kwds", ")", ":", "from", ".", "config", "import", "ConfigStore", "from", ".", "database", "import", "DataBase", "from", ".", "query", "import", "expand_query", ",", "preprocess_kwds", "cfstore", "=", "Confi...
Search command history.
[ "Search", "command", "history", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/search.py#L29-L48
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/figure_01.py
fig_intro
def fig_intro(params, ana_params, T=[800, 1000], fraction=0.05, rasterized=False): '''set up plot for introduction''' ana_params.set_PLOS_2column_fig_style(ratio=0.5) #load spike as database networkSim = CachedNetwork(**params.networkSimParams) if analysis_params.bw: networkSim.colors = phlp.get_colors(len(networkSim.X)) #set up figure and subplots fig = plt.figure() gs = gridspec.GridSpec(3, 4) fig.subplots_adjust(left=0.05, right=0.95, wspace=0.5, hspace=0.) #network diagram ax0_1 = fig.add_subplot(gs[:, 0], frameon=False) ax0_1.set_title('point-neuron network', va='bottom') network_sketch(ax0_1, yscaling=1.3) ax0_1.xaxis.set_ticks([]) ax0_1.yaxis.set_ticks([]) phlp.annotate_subplot(ax0_1, ncols=4, nrows=1, letter='A', linear_offset=0.065) #network raster ax1 = fig.add_subplot(gs[:, 1], frameon=True) phlp.remove_axis_junk(ax1) phlp.annotate_subplot(ax1, ncols=4, nrows=1, letter='B', linear_offset=0.065) x, y = networkSim.get_xy(T, fraction=fraction) # networkSim.plot_raster(ax1, T, x, y, markersize=0.1, alpha=1.,legend=False, pop_names=True) networkSim.plot_raster(ax1, T, x, y, markersize=0.2, marker='_', alpha=1.,legend=False, pop_names=True, rasterized=rasterized) ax1.set_ylabel('') ax1.xaxis.set_major_locator(plt.MaxNLocator(4)) ax1.set_title('spiking activity', va='bottom') a = ax1.axis() ax1.vlines(x['TC'][0], a[2], a[3], 'k', lw=0.25) #population ax2 = fig.add_subplot(gs[:, 2], frameon=False) ax2.xaxis.set_ticks([]) ax2.yaxis.set_ticks([]) plot_population(ax2, params, isometricangle=np.pi/24, plot_somas=False, plot_morphos=True, num_unitsE=1, num_unitsI=1, clip_dendrites=True, main_pops=True, title='', rasterized=rasterized) ax2.set_title('multicompartment\nneurons', va='bottom', fontweight='normal') phlp.annotate_subplot(ax2, ncols=4, nrows=1, letter='C', linear_offset=0.065) #LFP traces in all channels ax3 = fig.add_subplot(gs[:, 3], frameon=True) phlp.remove_axis_junk(ax3) plot_signal_sum(ax3, params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), unit='mV', vlimround=0.8, T=T, ylim=[ax2.axis()[2], ax2.axis()[3]], rasterized=False) ax3.set_title('LFP', va='bottom') ax3.xaxis.set_major_locator(plt.MaxNLocator(4)) phlp.annotate_subplot(ax3, ncols=4, nrows=1, letter='D', linear_offset=0.065) a = ax3.axis() ax3.vlines(x['TC'][0], a[2], a[3], 'k', lw=0.25) #draw some arrows: ax = plt.gca() ax.annotate("", xy=(0.27, 0.5), xytext=(.24, 0.5), xycoords="figure fraction", arrowprops=dict(facecolor='black', arrowstyle='simple'), ) ax.annotate("", xy=(0.52, 0.5), xytext=(.49, 0.5), xycoords="figure fraction", arrowprops=dict(facecolor='black', arrowstyle='simple'), ) ax.annotate("", xy=(0.78, 0.5), xytext=(.75, 0.5), xycoords="figure fraction", arrowprops=dict(facecolor='black', arrowstyle='simple'), ) return fig
python
def fig_intro(params, ana_params, T=[800, 1000], fraction=0.05, rasterized=False): '''set up plot for introduction''' ana_params.set_PLOS_2column_fig_style(ratio=0.5) #load spike as database networkSim = CachedNetwork(**params.networkSimParams) if analysis_params.bw: networkSim.colors = phlp.get_colors(len(networkSim.X)) #set up figure and subplots fig = plt.figure() gs = gridspec.GridSpec(3, 4) fig.subplots_adjust(left=0.05, right=0.95, wspace=0.5, hspace=0.) #network diagram ax0_1 = fig.add_subplot(gs[:, 0], frameon=False) ax0_1.set_title('point-neuron network', va='bottom') network_sketch(ax0_1, yscaling=1.3) ax0_1.xaxis.set_ticks([]) ax0_1.yaxis.set_ticks([]) phlp.annotate_subplot(ax0_1, ncols=4, nrows=1, letter='A', linear_offset=0.065) #network raster ax1 = fig.add_subplot(gs[:, 1], frameon=True) phlp.remove_axis_junk(ax1) phlp.annotate_subplot(ax1, ncols=4, nrows=1, letter='B', linear_offset=0.065) x, y = networkSim.get_xy(T, fraction=fraction) # networkSim.plot_raster(ax1, T, x, y, markersize=0.1, alpha=1.,legend=False, pop_names=True) networkSim.plot_raster(ax1, T, x, y, markersize=0.2, marker='_', alpha=1.,legend=False, pop_names=True, rasterized=rasterized) ax1.set_ylabel('') ax1.xaxis.set_major_locator(plt.MaxNLocator(4)) ax1.set_title('spiking activity', va='bottom') a = ax1.axis() ax1.vlines(x['TC'][0], a[2], a[3], 'k', lw=0.25) #population ax2 = fig.add_subplot(gs[:, 2], frameon=False) ax2.xaxis.set_ticks([]) ax2.yaxis.set_ticks([]) plot_population(ax2, params, isometricangle=np.pi/24, plot_somas=False, plot_morphos=True, num_unitsE=1, num_unitsI=1, clip_dendrites=True, main_pops=True, title='', rasterized=rasterized) ax2.set_title('multicompartment\nneurons', va='bottom', fontweight='normal') phlp.annotate_subplot(ax2, ncols=4, nrows=1, letter='C', linear_offset=0.065) #LFP traces in all channels ax3 = fig.add_subplot(gs[:, 3], frameon=True) phlp.remove_axis_junk(ax3) plot_signal_sum(ax3, params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), unit='mV', vlimround=0.8, T=T, ylim=[ax2.axis()[2], ax2.axis()[3]], rasterized=False) ax3.set_title('LFP', va='bottom') ax3.xaxis.set_major_locator(plt.MaxNLocator(4)) phlp.annotate_subplot(ax3, ncols=4, nrows=1, letter='D', linear_offset=0.065) a = ax3.axis() ax3.vlines(x['TC'][0], a[2], a[3], 'k', lw=0.25) #draw some arrows: ax = plt.gca() ax.annotate("", xy=(0.27, 0.5), xytext=(.24, 0.5), xycoords="figure fraction", arrowprops=dict(facecolor='black', arrowstyle='simple'), ) ax.annotate("", xy=(0.52, 0.5), xytext=(.49, 0.5), xycoords="figure fraction", arrowprops=dict(facecolor='black', arrowstyle='simple'), ) ax.annotate("", xy=(0.78, 0.5), xytext=(.75, 0.5), xycoords="figure fraction", arrowprops=dict(facecolor='black', arrowstyle='simple'), ) return fig
[ "def", "fig_intro", "(", "params", ",", "ana_params", ",", "T", "=", "[", "800", ",", "1000", "]", ",", "fraction", "=", "0.05", ",", "rasterized", "=", "False", ")", ":", "ana_params", ".", "set_PLOS_2column_fig_style", "(", "ratio", "=", "0.5", ")", ...
set up plot for introduction
[ "set", "up", "plot", "for", "introduction" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_01.py#L30-L114
tkf/rash
rash/indexer.py
Indexer.index_record
def index_record(self, json_path): """ Import `json_path` and remove it if :attr:`keep_json` is false. """ self.logger.debug('Indexing record: %s', json_path) json_path = os.path.abspath(json_path) self.check_path(json_path, '`json_path`') with open(json_path) as fp: try: dct = json.load(fp) except ValueError: warnings.warn( 'Ignoring invalid JSON file at: {0}'.format(json_path)) return record_type = self.get_record_type(json_path) kwds = {} if record_type == 'command': importer = self.db.import_dict kwds.update(check_duplicate=self.check_duplicate) elif record_type == 'init': importer = self.db.import_init_dict elif record_type == 'exit': importer = self.db.import_exit_dict else: raise ValueError("Unknown record type: {0}".format(record_type)) importer(dct, **kwds) if not self.keep_json: self.logger.info('Removing JSON record: %s', json_path) os.remove(json_path)
python
def index_record(self, json_path): """ Import `json_path` and remove it if :attr:`keep_json` is false. """ self.logger.debug('Indexing record: %s', json_path) json_path = os.path.abspath(json_path) self.check_path(json_path, '`json_path`') with open(json_path) as fp: try: dct = json.load(fp) except ValueError: warnings.warn( 'Ignoring invalid JSON file at: {0}'.format(json_path)) return record_type = self.get_record_type(json_path) kwds = {} if record_type == 'command': importer = self.db.import_dict kwds.update(check_duplicate=self.check_duplicate) elif record_type == 'init': importer = self.db.import_init_dict elif record_type == 'exit': importer = self.db.import_exit_dict else: raise ValueError("Unknown record type: {0}".format(record_type)) importer(dct, **kwds) if not self.keep_json: self.logger.info('Removing JSON record: %s', json_path) os.remove(json_path)
[ "def", "index_record", "(", "self", ",", "json_path", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Indexing record: %s'", ",", "json_path", ")", "json_path", "=", "os", ".", "path", ".", "abspath", "(", "json_path", ")", "self", ".", "check_path",...
Import `json_path` and remove it if :attr:`keep_json` is false.
[ "Import", "json_path", "and", "remove", "it", "if", ":", "attr", ":", "keep_json", "is", "false", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/indexer.py#L76-L107
tkf/rash
rash/indexer.py
Indexer.find_record_files
def find_record_files(self): """ Yield paths to record files. """ for (root, _, files) in os.walk(self.record_path): for f in (f for f in files if f.endswith('.json')): yield os.path.join(root, f)
python
def find_record_files(self): """ Yield paths to record files. """ for (root, _, files) in os.walk(self.record_path): for f in (f for f in files if f.endswith('.json')): yield os.path.join(root, f)
[ "def", "find_record_files", "(", "self", ")", ":", "for", "(", "root", ",", "_", ",", "files", ")", "in", "os", ".", "walk", "(", "self", ".", "record_path", ")", ":", "for", "f", "in", "(", "f", "for", "f", "in", "files", "if", "f", ".", "ends...
Yield paths to record files.
[ "Yield", "paths", "to", "record", "files", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/indexer.py#L109-L115
tkf/rash
rash/indexer.py
Indexer.index_all
def index_all(self): """ Index all records under :attr:`record_path`. """ self.logger.debug('Start indexing all records under: %s', self.record_path) with self.db.connection(): for json_path in sorted(self.find_record_files()): self.index_record(json_path)
python
def index_all(self): """ Index all records under :attr:`record_path`. """ self.logger.debug('Start indexing all records under: %s', self.record_path) with self.db.connection(): for json_path in sorted(self.find_record_files()): self.index_record(json_path)
[ "def", "index_all", "(", "self", ")", ":", "self", ".", "logger", ".", "debug", "(", "'Start indexing all records under: %s'", ",", "self", ".", "record_path", ")", "with", "self", ".", "db", ".", "connection", "(", ")", ":", "for", "json_path", "in", "sor...
Index all records under :attr:`record_path`.
[ "Index", "all", "records", "under", ":", "attr", ":", "record_path", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/indexer.py#L117-L125
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/figure_11.py
fig_lfp_corr
def fig_lfp_corr(params, savefolders, transient=200, channels=[0,3,7,11,13], Df=None, mlab=True, NFFT=256, noverlap=128, window=plt.mlab.window_hanning, letterslist=['AB', 'CD'], data_type = 'LFP'): '''This figure compares power spectra for correlated and uncorrelated signals ''' ana_params.set_PLOS_2column_fig_style(ratio=0.5) fig = plt.figure() fig.subplots_adjust(left=0.07, right=0.95, bottom=0.1, wspace=0.3, hspace=0.1) gs = gridspec.GridSpec(5, 4) for i, (savefolder, letters) in enumerate(zip(savefolders, letterslist)): # path to simulation files params.savefolder = os.path.join(os.path.split(params.savefolder)[0], savefolder) params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path ## Including correlations f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd),'r') freqs = f['freqs'].value LFP_PSD_corr = f['psd'].value f.close() ## Excluding correlations f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd_uncorr),'r') freqs = f['freqs'].value LFP_PSD_uncorr = f['psd'].value f.close() ################################## ### Single channel LFP PSDs ### ################################## ax = fig.add_subplot(gs[0, (i % 2)*2]) phlp.remove_axis_junk(ax) ax.loglog(freqs,LFP_PSD_corr[channels[0]], color='k', label='$P$') ax.loglog(freqs,LFP_PSD_uncorr[channels[0]], color='gray' if analysis_params.bw else analysis_params.colorP, lw=1, label='$\tilde{P}$') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.text(0.80,0.82,'ch. %i' %(channels[0]+1),horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.set_ylabel('(mV$^2$/Hz)', labelpad=0.) ax.set_xticks([]) ax.set_xticklabels([]) ax.tick_params(axis='y',which='minor',bottom='off') ax.set_xlim(4E0,4E2) ax.set_ylim(1E-8,1.5E-4) ax.set_yticks([1E-8,1E-6,1E-4]) ax.set_title('power spectra') phlp.annotate_subplot(ax, ncols=4, nrows=5, letter=letters[0], linear_offset=0.065) ax = fig.add_subplot(gs[1, (i % 2)*2]) phlp.remove_axis_junk(ax) ax.loglog(freqs,LFP_PSD_corr[channels[1]], color='k', label='corr') ax.loglog(freqs,LFP_PSD_uncorr[channels[1]], color='gray' if analysis_params.bw else analysis_params.colorP, lw=1, label='uncorr') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.text(0.80,0.82,'ch. %i' %(channels[1]+1),horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.set_xticks([]) ax.set_xticklabels([]) ax.tick_params(axis='y',which='minor',bottom='off') ax.set_xlim(4E0,4E2) ax.set_ylim(1E-8,1.5E-4) ax.set_yticks([1E-8,1E-6,1E-4]) ax.set_yticklabels([]) ax = fig.add_subplot(gs[2, (i % 2)*2]) phlp.remove_axis_junk(ax) ax.loglog(freqs,LFP_PSD_corr[channels[2]], color='k', label='corr') ax.loglog(freqs,LFP_PSD_uncorr[channels[2]], color='gray' if analysis_params.bw else analysis_params.colorP, lw=1, label='uncorr') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.text(0.80,0.82,'ch. %i' %(channels[2]+1),horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.set_xticks([]) ax.set_xticklabels([]) ax.tick_params(axis='y',which='minor',bottom='off') ax.set_xlim(4E0,4E2) ax.set_ylim(1E-8,1.5E-4) ax.set_yticks([1E-8,1E-6,1E-4]) ax.set_yticklabels([]) ax = fig.add_subplot(gs[3, (i % 2)*2]) phlp.remove_axis_junk(ax) ax.loglog(freqs,LFP_PSD_corr[channels[3]], color='k', label='corr') ax.loglog(freqs,LFP_PSD_uncorr[channels[3]], color='gray' if analysis_params.bw else analysis_params.colorP, lw=1, label='uncorr') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.text(0.80,0.82,'ch. %i' %(channels[3]+1),horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.set_xticks([]) ax.set_xticklabels([]) ax.tick_params(axis='y',which='minor',bottom='off') ax.set_xlim(4E0,4E2) ax.set_ylim(1E-8,1.5E-4) ax.set_yticks([1E-8,1E-6,1E-4]) ax.set_yticklabels([]) ax = fig.add_subplot(gs[4, (i % 2)*2]) phlp.remove_axis_junk(ax) ax.loglog(freqs,LFP_PSD_corr[channels[4]], color='k', label='corr') ax.loglog(freqs,LFP_PSD_uncorr[channels[4]], color='gray' if analysis_params.bw else analysis_params.colorP, lw=1, label='uncorr') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.set_xlabel(r'$f$ (Hz)', labelpad=0.2) ax.text(0.80,0.82,'ch. %i' %(channels[4]+1),horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.tick_params(axis='y',which='minor',bottom='off') ax.set_xlim(4E0,4E2) ax.set_ylim(1E-8,1.5E-4) ax.set_yticks([1E-8,1E-6,1E-4]) ax.set_yticklabels([]) ################################## ### LFP PSD ratios ### ################################## ax = fig.add_subplot(gs[:, (i % 2)*2 + 1]) phlp.annotate_subplot(ax, ncols=4, nrows=1, letter=letters[1], linear_offset=0.065) phlp.remove_axis_junk(ax) ax.set_title('power ratio') PSD_ratio = LFP_PSD_corr/LFP_PSD_uncorr zvec = np.r_[params.electrodeParams['z']] zvec = np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]] inds = freqs >= 1 # frequencies greater than 4 Hz im = ax.pcolormesh(freqs[inds], zvec+40, PSD_ratio[:, inds], rasterized=False, cmap=plt.get_cmap('gray_r', 12) if analysis_params.bw else plt.get_cmap('Reds', 12), vmin=10**-0.25,vmax=10**2.75,norm=LogNorm()) ax.set_xscale('log') ax.set_yticks(zvec) yticklabels = ['ch. %i' %i for i in np.arange(len(zvec))+1] ax.set_yticklabels(yticklabels) ax.set_xlabel(r'$f$ (Hz)',labelpad=0.2) plt.axis('tight') ax.set_xlim([4E0, 4E2]) cb = phlp.colorbar(fig, ax, im, width=0.05, height=0.5, hoffset=-0.05, voffset=0.0) cb.set_label('(-)', labelpad=0.1) return fig
python
def fig_lfp_corr(params, savefolders, transient=200, channels=[0,3,7,11,13], Df=None, mlab=True, NFFT=256, noverlap=128, window=plt.mlab.window_hanning, letterslist=['AB', 'CD'], data_type = 'LFP'): '''This figure compares power spectra for correlated and uncorrelated signals ''' ana_params.set_PLOS_2column_fig_style(ratio=0.5) fig = plt.figure() fig.subplots_adjust(left=0.07, right=0.95, bottom=0.1, wspace=0.3, hspace=0.1) gs = gridspec.GridSpec(5, 4) for i, (savefolder, letters) in enumerate(zip(savefolders, letterslist)): # path to simulation files params.savefolder = os.path.join(os.path.split(params.savefolder)[0], savefolder) params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path ## Including correlations f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd),'r') freqs = f['freqs'].value LFP_PSD_corr = f['psd'].value f.close() ## Excluding correlations f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd_uncorr),'r') freqs = f['freqs'].value LFP_PSD_uncorr = f['psd'].value f.close() ################################## ### Single channel LFP PSDs ### ################################## ax = fig.add_subplot(gs[0, (i % 2)*2]) phlp.remove_axis_junk(ax) ax.loglog(freqs,LFP_PSD_corr[channels[0]], color='k', label='$P$') ax.loglog(freqs,LFP_PSD_uncorr[channels[0]], color='gray' if analysis_params.bw else analysis_params.colorP, lw=1, label='$\tilde{P}$') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.text(0.80,0.82,'ch. %i' %(channels[0]+1),horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.set_ylabel('(mV$^2$/Hz)', labelpad=0.) ax.set_xticks([]) ax.set_xticklabels([]) ax.tick_params(axis='y',which='minor',bottom='off') ax.set_xlim(4E0,4E2) ax.set_ylim(1E-8,1.5E-4) ax.set_yticks([1E-8,1E-6,1E-4]) ax.set_title('power spectra') phlp.annotate_subplot(ax, ncols=4, nrows=5, letter=letters[0], linear_offset=0.065) ax = fig.add_subplot(gs[1, (i % 2)*2]) phlp.remove_axis_junk(ax) ax.loglog(freqs,LFP_PSD_corr[channels[1]], color='k', label='corr') ax.loglog(freqs,LFP_PSD_uncorr[channels[1]], color='gray' if analysis_params.bw else analysis_params.colorP, lw=1, label='uncorr') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.text(0.80,0.82,'ch. %i' %(channels[1]+1),horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.set_xticks([]) ax.set_xticklabels([]) ax.tick_params(axis='y',which='minor',bottom='off') ax.set_xlim(4E0,4E2) ax.set_ylim(1E-8,1.5E-4) ax.set_yticks([1E-8,1E-6,1E-4]) ax.set_yticklabels([]) ax = fig.add_subplot(gs[2, (i % 2)*2]) phlp.remove_axis_junk(ax) ax.loglog(freqs,LFP_PSD_corr[channels[2]], color='k', label='corr') ax.loglog(freqs,LFP_PSD_uncorr[channels[2]], color='gray' if analysis_params.bw else analysis_params.colorP, lw=1, label='uncorr') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.text(0.80,0.82,'ch. %i' %(channels[2]+1),horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.set_xticks([]) ax.set_xticklabels([]) ax.tick_params(axis='y',which='minor',bottom='off') ax.set_xlim(4E0,4E2) ax.set_ylim(1E-8,1.5E-4) ax.set_yticks([1E-8,1E-6,1E-4]) ax.set_yticklabels([]) ax = fig.add_subplot(gs[3, (i % 2)*2]) phlp.remove_axis_junk(ax) ax.loglog(freqs,LFP_PSD_corr[channels[3]], color='k', label='corr') ax.loglog(freqs,LFP_PSD_uncorr[channels[3]], color='gray' if analysis_params.bw else analysis_params.colorP, lw=1, label='uncorr') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.text(0.80,0.82,'ch. %i' %(channels[3]+1),horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.set_xticks([]) ax.set_xticklabels([]) ax.tick_params(axis='y',which='minor',bottom='off') ax.set_xlim(4E0,4E2) ax.set_ylim(1E-8,1.5E-4) ax.set_yticks([1E-8,1E-6,1E-4]) ax.set_yticklabels([]) ax = fig.add_subplot(gs[4, (i % 2)*2]) phlp.remove_axis_junk(ax) ax.loglog(freqs,LFP_PSD_corr[channels[4]], color='k', label='corr') ax.loglog(freqs,LFP_PSD_uncorr[channels[4]], color='gray' if analysis_params.bw else analysis_params.colorP, lw=1, label='uncorr') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.set_xlabel(r'$f$ (Hz)', labelpad=0.2) ax.text(0.80,0.82,'ch. %i' %(channels[4]+1),horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.tick_params(axis='y',which='minor',bottom='off') ax.set_xlim(4E0,4E2) ax.set_ylim(1E-8,1.5E-4) ax.set_yticks([1E-8,1E-6,1E-4]) ax.set_yticklabels([]) ################################## ### LFP PSD ratios ### ################################## ax = fig.add_subplot(gs[:, (i % 2)*2 + 1]) phlp.annotate_subplot(ax, ncols=4, nrows=1, letter=letters[1], linear_offset=0.065) phlp.remove_axis_junk(ax) ax.set_title('power ratio') PSD_ratio = LFP_PSD_corr/LFP_PSD_uncorr zvec = np.r_[params.electrodeParams['z']] zvec = np.r_[zvec, zvec[-1] + np.diff(zvec)[-1]] inds = freqs >= 1 # frequencies greater than 4 Hz im = ax.pcolormesh(freqs[inds], zvec+40, PSD_ratio[:, inds], rasterized=False, cmap=plt.get_cmap('gray_r', 12) if analysis_params.bw else plt.get_cmap('Reds', 12), vmin=10**-0.25,vmax=10**2.75,norm=LogNorm()) ax.set_xscale('log') ax.set_yticks(zvec) yticklabels = ['ch. %i' %i for i in np.arange(len(zvec))+1] ax.set_yticklabels(yticklabels) ax.set_xlabel(r'$f$ (Hz)',labelpad=0.2) plt.axis('tight') ax.set_xlim([4E0, 4E2]) cb = phlp.colorbar(fig, ax, im, width=0.05, height=0.5, hoffset=-0.05, voffset=0.0) cb.set_label('(-)', labelpad=0.1) return fig
[ "def", "fig_lfp_corr", "(", "params", ",", "savefolders", ",", "transient", "=", "200", ",", "channels", "=", "[", "0", ",", "3", ",", "7", ",", "11", ",", "13", "]", ",", "Df", "=", "None", ",", "mlab", "=", "True", ",", "NFFT", "=", "256", ",...
This figure compares power spectra for correlated and uncorrelated signals
[ "This", "figure", "compares", "power", "spectra", "for", "correlated", "and", "uncorrelated", "signals" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_11.py#L31-L223
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/nest_simulation.py
sli_run
def sli_run(parameters=object(), fname='microcircuit.sli', verbosity='M_ERROR'): ''' Takes parameter-class and name of main sli-script as input, initiating the simulation. kwargs: :: parameters : object, parameter class instance fname : str, path to sli codes to be executed verbosity : 'str', nest verbosity flag ''' # Load parameters from params file, and pass them to nest # Python -> SLI send_nest_params_to_sli(vars(parameters)) #set SLI verbosity nest.sli_run("%s setverbosity" % verbosity) # Run NEST/SLI simulation nest.sli_run('(%s) run' % fname)
python
def sli_run(parameters=object(), fname='microcircuit.sli', verbosity='M_ERROR'): ''' Takes parameter-class and name of main sli-script as input, initiating the simulation. kwargs: :: parameters : object, parameter class instance fname : str, path to sli codes to be executed verbosity : 'str', nest verbosity flag ''' # Load parameters from params file, and pass them to nest # Python -> SLI send_nest_params_to_sli(vars(parameters)) #set SLI verbosity nest.sli_run("%s setverbosity" % verbosity) # Run NEST/SLI simulation nest.sli_run('(%s) run' % fname)
[ "def", "sli_run", "(", "parameters", "=", "object", "(", ")", ",", "fname", "=", "'microcircuit.sli'", ",", "verbosity", "=", "'M_ERROR'", ")", ":", "# Load parameters from params file, and pass them to nest", "# Python -> SLI", "send_nest_params_to_sli", "(", "vars", "...
Takes parameter-class and name of main sli-script as input, initiating the simulation. kwargs: :: parameters : object, parameter class instance fname : str, path to sli codes to be executed verbosity : 'str', nest verbosity flag
[ "Takes", "parameter", "-", "class", "and", "name", "of", "main", "sli", "-", "script", "as", "input", "initiating", "the", "simulation", ".", "kwargs", ":", "::", "parameters", ":", "object", "parameter", "class", "instance", "fname", ":", "str", "path", "...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/nest_simulation.py#L55-L76
pebble/libpebble2
libpebble2/protocol/base/types.py
Field.buffer_to_value
def buffer_to_value(self, obj, buffer, offset, default_endianness=DEFAULT_ENDIANNESS): """ Converts the bytes in ``buffer`` at ``offset`` to a native Python value. Returns that value and the number of bytes consumed to create it. :param obj: The parent :class:`.PebblePacket` of this field :type obj: .PebblePacket :param buffer: The buffer from which to extract a value. :type buffer: bytes :param offset: The offset in the buffer to start at. :type offset: int :param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the :class:`Field` constructor. :type default_endianness: str :return: (value, length) :rtype: (:class:`object`, :any:`int`) """ try: value, length = struct.unpack_from(str(self.endianness or default_endianness) + self.struct_format, buffer, offset)[0], struct.calcsize(self.struct_format) if self._enum is not None: try: return self._enum(value), length except ValueError as e: raise PacketDecodeError("{}: {}".format(self.type, e)) else: return value, length except struct.error as e: raise PacketDecodeError("{}: {}".format(self.type, e))
python
def buffer_to_value(self, obj, buffer, offset, default_endianness=DEFAULT_ENDIANNESS): """ Converts the bytes in ``buffer`` at ``offset`` to a native Python value. Returns that value and the number of bytes consumed to create it. :param obj: The parent :class:`.PebblePacket` of this field :type obj: .PebblePacket :param buffer: The buffer from which to extract a value. :type buffer: bytes :param offset: The offset in the buffer to start at. :type offset: int :param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the :class:`Field` constructor. :type default_endianness: str :return: (value, length) :rtype: (:class:`object`, :any:`int`) """ try: value, length = struct.unpack_from(str(self.endianness or default_endianness) + self.struct_format, buffer, offset)[0], struct.calcsize(self.struct_format) if self._enum is not None: try: return self._enum(value), length except ValueError as e: raise PacketDecodeError("{}: {}".format(self.type, e)) else: return value, length except struct.error as e: raise PacketDecodeError("{}: {}".format(self.type, e))
[ "def", "buffer_to_value", "(", "self", ",", "obj", ",", "buffer", ",", "offset", ",", "default_endianness", "=", "DEFAULT_ENDIANNESS", ")", ":", "try", ":", "value", ",", "length", "=", "struct", ".", "unpack_from", "(", "str", "(", "self", ".", "endiannes...
Converts the bytes in ``buffer`` at ``offset`` to a native Python value. Returns that value and the number of bytes consumed to create it. :param obj: The parent :class:`.PebblePacket` of this field :type obj: .PebblePacket :param buffer: The buffer from which to extract a value. :type buffer: bytes :param offset: The offset in the buffer to start at. :type offset: int :param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the :class:`Field` constructor. :type default_endianness: str :return: (value, length) :rtype: (:class:`object`, :any:`int`)
[ "Converts", "the", "bytes", "in", "buffer", "at", "offset", "to", "a", "native", "Python", "value", ".", "Returns", "that", "value", "and", "the", "number", "of", "bytes", "consumed", "to", "create", "it", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/protocol/base/types.py#L43-L71
pebble/libpebble2
libpebble2/protocol/base/types.py
Field.value_to_bytes
def value_to_bytes(self, obj, value, default_endianness=DEFAULT_ENDIANNESS): """ Converts the given value to an appropriately encoded string of bytes that represents it. :param obj: The parent :class:`.PebblePacket` of this field :type obj: .PebblePacket :param value: The python value to serialise. :param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the :class:`Field` constructor. :type default_endianness: str :return: The serialised value :rtype: bytes """ return struct.pack(str(self.endianness or default_endianness) + self.struct_format, value)
python
def value_to_bytes(self, obj, value, default_endianness=DEFAULT_ENDIANNESS): """ Converts the given value to an appropriately encoded string of bytes that represents it. :param obj: The parent :class:`.PebblePacket` of this field :type obj: .PebblePacket :param value: The python value to serialise. :param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the :class:`Field` constructor. :type default_endianness: str :return: The serialised value :rtype: bytes """ return struct.pack(str(self.endianness or default_endianness) + self.struct_format, value)
[ "def", "value_to_bytes", "(", "self", ",", "obj", ",", "value", ",", "default_endianness", "=", "DEFAULT_ENDIANNESS", ")", ":", "return", "struct", ".", "pack", "(", "str", "(", "self", ".", "endianness", "or", "default_endianness", ")", "+", "self", ".", ...
Converts the given value to an appropriately encoded string of bytes that represents it. :param obj: The parent :class:`.PebblePacket` of this field :type obj: .PebblePacket :param value: The python value to serialise. :param default_endianness: The default endianness of the value. Used if ``endianness`` was not passed to the :class:`Field` constructor. :type default_endianness: str :return: The serialised value :rtype: bytes
[ "Converts", "the", "given", "value", "to", "an", "appropriately", "encoded", "string", "of", "bytes", "that", "represents", "it", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/protocol/base/types.py#L73-L86
alpha-xone/xone
xone/profile.py
profile
def profile(func): """ Decorator to profile functions with cProfile Args: func: python function Returns: profile report References: https://osf.io/upav8/ """ def inner(*args, **kwargs): pr = cProfile.Profile() pr.enable() res = func(*args, **kwargs) pr.disable() s = io.StringIO() ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') ps.print_stats() print(s.getvalue()) return res return inner
python
def profile(func): """ Decorator to profile functions with cProfile Args: func: python function Returns: profile report References: https://osf.io/upav8/ """ def inner(*args, **kwargs): pr = cProfile.Profile() pr.enable() res = func(*args, **kwargs) pr.disable() s = io.StringIO() ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') ps.print_stats() print(s.getvalue()) return res return inner
[ "def", "profile", "(", "func", ")", ":", "def", "inner", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "pr", "=", "cProfile", ".", "Profile", "(", ")", "pr", ".", "enable", "(", ")", "res", "=", "func", "(", "*", "args", ",", "*", "*",...
Decorator to profile functions with cProfile Args: func: python function Returns: profile report References: https://osf.io/upav8/
[ "Decorator", "to", "profile", "functions", "with", "cProfile" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/profile.py#L6-L31
mathiasertl/xmpp-backends
xmpp_backends/ejabberd_xmlrpc.py
EjabberdXMLRPCBackend.rpc
def rpc(self, cmd, **kwargs): """Generic helper function to call an RPC method.""" func = getattr(self.client, cmd) try: if self.credentials is None: return func(kwargs) else: return func(self.credentials, kwargs) except socket.error as e: raise BackendConnectionError(e) except (xmlrpclib.ProtocolError, BadStatusLine) as e: log.error(e) raise BackendError("Error reaching backend.")
python
def rpc(self, cmd, **kwargs): """Generic helper function to call an RPC method.""" func = getattr(self.client, cmd) try: if self.credentials is None: return func(kwargs) else: return func(self.credentials, kwargs) except socket.error as e: raise BackendConnectionError(e) except (xmlrpclib.ProtocolError, BadStatusLine) as e: log.error(e) raise BackendError("Error reaching backend.")
[ "def", "rpc", "(", "self", ",", "cmd", ",", "*", "*", "kwargs", ")", ":", "func", "=", "getattr", "(", "self", ".", "client", ",", "cmd", ")", "try", ":", "if", "self", ".", "credentials", "is", "None", ":", "return", "func", "(", "kwargs", ")", ...
Generic helper function to call an RPC method.
[ "Generic", "helper", "function", "to", "call", "an", "RPC", "method", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/ejabberd_xmlrpc.py#L107-L120
mathiasertl/xmpp-backends
xmpp_backends/ejabberd_xmlrpc.py
EjabberdXMLRPCBackend.message_user
def message_user(self, username, domain, subject, message): """Currently use send_message_chat and discard subject, because headline messages are not stored by mod_offline.""" kwargs = { 'body': message, 'from': domain, 'to': '%s@%s' % (username, domain), } if self.api_version <= (14, 7): # TODO: it's unclear when send_message was introduced command = 'send_message_chat' else: command = 'send_message' kwargs['subject'] = subject kwargs['type'] = 'normal' result = self.rpc(command, **kwargs) if result['res'] == 0: return else: raise BackendError(result.get('text', 'Unknown Error'))
python
def message_user(self, username, domain, subject, message): """Currently use send_message_chat and discard subject, because headline messages are not stored by mod_offline.""" kwargs = { 'body': message, 'from': domain, 'to': '%s@%s' % (username, domain), } if self.api_version <= (14, 7): # TODO: it's unclear when send_message was introduced command = 'send_message_chat' else: command = 'send_message' kwargs['subject'] = subject kwargs['type'] = 'normal' result = self.rpc(command, **kwargs) if result['res'] == 0: return else: raise BackendError(result.get('text', 'Unknown Error'))
[ "def", "message_user", "(", "self", ",", "username", ",", "domain", ",", "subject", ",", "message", ")", ":", "kwargs", "=", "{", "'body'", ":", "message", ",", "'from'", ":", "domain", ",", "'to'", ":", "'%s@%s'", "%", "(", "username", ",", "domain", ...
Currently use send_message_chat and discard subject, because headline messages are not stored by mod_offline.
[ "Currently", "use", "send_message_chat", "and", "discard", "subject", "because", "headline", "messages", "are", "not", "stored", "by", "mod_offline", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/ejabberd_xmlrpc.py#L263-L285
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/figure_08.py
fig_network_input_structure
def fig_network_input_structure(fig, params, bottom=0.1, top=0.9, transient=200, T=[800, 1000], Df= 0., mlab= True, NFFT=256, srate=1000, window=plt.mlab.window_hanning, noverlap=256*3/4, letters='abcde', flim=(4, 400), show_titles=True, show_xlabels=True, show_CSD=False): ''' This figure is the top part for plotting a comparison between the PD-model and the modified-PD model ''' #load spike as database networkSim = CachedNetwork(**params.networkSimParams) if analysis_params.bw: networkSim.colors = phlp.get_colors(len(networkSim.X)) # ana_params.set_PLOS_2column_fig_style(ratio=ratio) # fig = plt.figure() # fig.subplots_adjust(left=0.06, right=0.94, bottom=0.09, top=0.92, wspace=0.5, hspace=0.2) #use gridspec to get nicely aligned subplots througout panel gs1 = gridspec.GridSpec(5, 5, bottom=bottom, top=top) ############################################################################ # A part, full dot display ############################################################################ ax0 = fig.add_subplot(gs1[:, 0]) phlp.remove_axis_junk(ax0) phlp.annotate_subplot(ax0, ncols=5, nrows=1, letter=letters[0], linear_offset=0.065) x, y = networkSim.get_xy(T, fraction=1) networkSim.plot_raster(ax0, T, x, y, markersize=0.2, marker='_', alpha=1., legend=False, pop_names=True, rasterized=False) ax0.set_ylabel('population', labelpad=0.) ax0.set_xticks([800,900,1000]) if show_titles: ax0.set_title('spiking activity',va='center') if show_xlabels: ax0.set_xlabel(r'$t$ (ms)', labelpad=0.) else: ax0.set_xlabel('') ############################################################################ # B part, firing rate spectra ############################################################################ # Get the firing rate from Potjan Diesmann et al network activity #collect the spikes x is the times, y is the id of the cell. T_all=[transient, networkSim.simtime] bins = np.arange(transient, networkSim.simtime+1) x, y = networkSim.get_xy(T_all, fraction=1) # create invisible axes to position labels correctly ax_ = fig.add_subplot(gs1[:, 1]) phlp.annotate_subplot(ax_, ncols=5, nrows=1, letter=letters[1], linear_offset=0.065) if show_titles: ax_.set_title('firing rate PSD', va='center') ax_.axis('off') colors = phlp.get_colors(len(params.Y))+['k'] COUNTER = 0 label_set = False tits = ['L23E/I', 'L4E/I', 'L5E/I', 'L6E/I', 'TC'] if x['TC'].size > 0: TC = True else: TC = False BAxes = [] for i, X in enumerate(networkSim.X): if i % 2 == 0: ax1 = fig.add_subplot(gs1[COUNTER, 1]) phlp.remove_axis_junk(ax1) if x[X].size > 0: ax1.text(0.05, 0.85, tits[COUNTER], horizontalalignment='left', verticalalignment='bottom', transform=ax1.transAxes) BAxes.append(ax1) #firing rate histogram hist = np.histogram(x[X], bins=bins)[0].astype(float) hist -= hist.mean() if mlab: Pxx, freqs=plt.mlab.psd(hist, NFFT=NFFT, Fs=srate, noverlap=noverlap, window=window) else: [freqs, Pxx] = hlp.powerspec([hist], tbin= 1., Df=Df, pointProcess=False) mask = np.where(freqs >= 0.) freqs = freqs[mask] Pxx = Pxx.flatten() Pxx = Pxx[mask] Pxx = Pxx/(T_all[1]-T_all[0])**2 if x[X].size > 0: ax1.loglog(freqs[1:], Pxx[1:], label=X, color=colors[i], clip_on=True) ax1.axis(ax1.axis('tight')) ax1.set_ylim([5E-4,5E2]) ax1.set_yticks([1E-3,1E-1,1E1]) if label_set == False: ax1.set_ylabel(r'(s$^{-2}$/Hz)', labelpad=0.) label_set = True if i > 1: ax1.set_yticklabels([]) if i >= 6 and not TC and show_xlabels or X == 'TC' and TC and show_xlabels: ax1.set_xlabel('$f$ (Hz)', labelpad=0.) if TC and i < 8 or not TC and i < 6: ax1.set_xticklabels([]) else: ax1.axis('off') ax1.set_xlim(flim) if i % 2 == 0: COUNTER += 1 ax1.yaxis.set_minor_locator(plt.NullLocator()) ############################################################################ # c part, LFP traces and CSD color plots ############################################################################ ax2 = fig.add_subplot(gs1[:, 2]) phlp.annotate_subplot(ax2, ncols=5, nrows=1, letter=letters[2], linear_offset=0.065) phlp.remove_axis_junk(ax2) plot_signal_sum(ax2, params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), unit='mV', T=T, ylim=[-1600, 40], rasterized=False) # CSD background colorplot if show_CSD: im = plot_signal_sum_colorplot(ax2, params, os.path.join(params.savefolder, 'CSDsum.h5'), unit=r'($\mu$Amm$^{-3}$)', T=[800, 1000], colorbar=False, ylim=[-1600, 40], fancy=False, cmap=plt.cm.get_cmap('bwr_r', 21), rasterized=False) cb = phlp.colorbar(fig, ax2, im, width=0.05, height=0.4, hoffset=-0.05, voffset=0.3) cb.set_label('($\mu$Amm$^{-3}$)', labelpad=0.1) ax2.set_xticks([800,900,1000]) ax2.axis(ax2.axis('tight')) if show_titles: if show_CSD: ax2.set_title('LFP & CSD', va='center') else: ax2.set_title('LFP', va='center') if show_xlabels: ax2.set_xlabel(r'$t$ (ms)', labelpad=0.) else: ax2.set_xlabel('') ############################################################################ # d part, LFP power trace for each layer ############################################################################ freqs, PSD = calc_signal_power(params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), transient=transient, Df=Df, mlab=mlab, NFFT=NFFT, noverlap=noverlap, window=window) channels = [0, 3, 7, 11, 13] # create invisible axes to position labels correctly ax_ = fig.add_subplot(gs1[:, 3]) phlp.annotate_subplot(ax_, ncols=5, nrows=1, letter=letters[3], linear_offset=0.065) if show_titles: ax_.set_title('LFP PSD',va='center') ax_.axis('off') for i, ch in enumerate(channels): ax = fig.add_subplot(gs1[i, 3]) phlp.remove_axis_junk(ax) if i == 0: ax.set_ylabel('(mV$^2$/Hz)', labelpad=0) ax.loglog(freqs[1:],PSD[ch][1:], color='k') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') if i < 4: ax.set_xticklabels([]) ax.text(0.75, 0.85,'ch. %i' %(channels[i]+1), horizontalalignment='left', verticalalignment='bottom', fontsize=6, transform=ax.transAxes) ax.tick_params(axis='y', which='minor', bottom='off') ax.axis(ax.axis('tight')) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.set_xlim(flim) ax.set_ylim(1E-7,2E-4) if i != 0 : ax.set_yticklabels([]) if show_xlabels: ax.set_xlabel('$f$ (Hz)', labelpad=0.) ############################################################################ # e part signal power ############################################################################ ax4 = fig.add_subplot(gs1[:, 4]) phlp.annotate_subplot(ax4, ncols=5, nrows=1, letter=letters[4], linear_offset=0.065) fname=os.path.join(params.savefolder, 'LFPsum.h5') im = plot_signal_power_colorplot(ax4, params, fname=fname, transient=transient, Df=Df, mlab=mlab, NFFT=NFFT, window=window, cmap=plt.cm.get_cmap('gray_r', 12), vmin=1E-7, vmax=1E-4) phlp.remove_axis_junk(ax4) ax4.set_xlim(flim) cb = phlp.colorbar(fig, ax4, im, width=0.05, height=0.5, hoffset=-0.05, voffset=0.5) cb.set_label('(mV$^2$/Hz)', labelpad=0.1) if show_titles: ax4.set_title('LFP PSD', va='center') if show_xlabels: ax4.set_xlabel(r'$f$ (Hz)', labelpad=0.) else: ax4.set_xlabel('') return fig
python
def fig_network_input_structure(fig, params, bottom=0.1, top=0.9, transient=200, T=[800, 1000], Df= 0., mlab= True, NFFT=256, srate=1000, window=plt.mlab.window_hanning, noverlap=256*3/4, letters='abcde', flim=(4, 400), show_titles=True, show_xlabels=True, show_CSD=False): ''' This figure is the top part for plotting a comparison between the PD-model and the modified-PD model ''' #load spike as database networkSim = CachedNetwork(**params.networkSimParams) if analysis_params.bw: networkSim.colors = phlp.get_colors(len(networkSim.X)) # ana_params.set_PLOS_2column_fig_style(ratio=ratio) # fig = plt.figure() # fig.subplots_adjust(left=0.06, right=0.94, bottom=0.09, top=0.92, wspace=0.5, hspace=0.2) #use gridspec to get nicely aligned subplots througout panel gs1 = gridspec.GridSpec(5, 5, bottom=bottom, top=top) ############################################################################ # A part, full dot display ############################################################################ ax0 = fig.add_subplot(gs1[:, 0]) phlp.remove_axis_junk(ax0) phlp.annotate_subplot(ax0, ncols=5, nrows=1, letter=letters[0], linear_offset=0.065) x, y = networkSim.get_xy(T, fraction=1) networkSim.plot_raster(ax0, T, x, y, markersize=0.2, marker='_', alpha=1., legend=False, pop_names=True, rasterized=False) ax0.set_ylabel('population', labelpad=0.) ax0.set_xticks([800,900,1000]) if show_titles: ax0.set_title('spiking activity',va='center') if show_xlabels: ax0.set_xlabel(r'$t$ (ms)', labelpad=0.) else: ax0.set_xlabel('') ############################################################################ # B part, firing rate spectra ############################################################################ # Get the firing rate from Potjan Diesmann et al network activity #collect the spikes x is the times, y is the id of the cell. T_all=[transient, networkSim.simtime] bins = np.arange(transient, networkSim.simtime+1) x, y = networkSim.get_xy(T_all, fraction=1) # create invisible axes to position labels correctly ax_ = fig.add_subplot(gs1[:, 1]) phlp.annotate_subplot(ax_, ncols=5, nrows=1, letter=letters[1], linear_offset=0.065) if show_titles: ax_.set_title('firing rate PSD', va='center') ax_.axis('off') colors = phlp.get_colors(len(params.Y))+['k'] COUNTER = 0 label_set = False tits = ['L23E/I', 'L4E/I', 'L5E/I', 'L6E/I', 'TC'] if x['TC'].size > 0: TC = True else: TC = False BAxes = [] for i, X in enumerate(networkSim.X): if i % 2 == 0: ax1 = fig.add_subplot(gs1[COUNTER, 1]) phlp.remove_axis_junk(ax1) if x[X].size > 0: ax1.text(0.05, 0.85, tits[COUNTER], horizontalalignment='left', verticalalignment='bottom', transform=ax1.transAxes) BAxes.append(ax1) #firing rate histogram hist = np.histogram(x[X], bins=bins)[0].astype(float) hist -= hist.mean() if mlab: Pxx, freqs=plt.mlab.psd(hist, NFFT=NFFT, Fs=srate, noverlap=noverlap, window=window) else: [freqs, Pxx] = hlp.powerspec([hist], tbin= 1., Df=Df, pointProcess=False) mask = np.where(freqs >= 0.) freqs = freqs[mask] Pxx = Pxx.flatten() Pxx = Pxx[mask] Pxx = Pxx/(T_all[1]-T_all[0])**2 if x[X].size > 0: ax1.loglog(freqs[1:], Pxx[1:], label=X, color=colors[i], clip_on=True) ax1.axis(ax1.axis('tight')) ax1.set_ylim([5E-4,5E2]) ax1.set_yticks([1E-3,1E-1,1E1]) if label_set == False: ax1.set_ylabel(r'(s$^{-2}$/Hz)', labelpad=0.) label_set = True if i > 1: ax1.set_yticklabels([]) if i >= 6 and not TC and show_xlabels or X == 'TC' and TC and show_xlabels: ax1.set_xlabel('$f$ (Hz)', labelpad=0.) if TC and i < 8 or not TC and i < 6: ax1.set_xticklabels([]) else: ax1.axis('off') ax1.set_xlim(flim) if i % 2 == 0: COUNTER += 1 ax1.yaxis.set_minor_locator(plt.NullLocator()) ############################################################################ # c part, LFP traces and CSD color plots ############################################################################ ax2 = fig.add_subplot(gs1[:, 2]) phlp.annotate_subplot(ax2, ncols=5, nrows=1, letter=letters[2], linear_offset=0.065) phlp.remove_axis_junk(ax2) plot_signal_sum(ax2, params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), unit='mV', T=T, ylim=[-1600, 40], rasterized=False) # CSD background colorplot if show_CSD: im = plot_signal_sum_colorplot(ax2, params, os.path.join(params.savefolder, 'CSDsum.h5'), unit=r'($\mu$Amm$^{-3}$)', T=[800, 1000], colorbar=False, ylim=[-1600, 40], fancy=False, cmap=plt.cm.get_cmap('bwr_r', 21), rasterized=False) cb = phlp.colorbar(fig, ax2, im, width=0.05, height=0.4, hoffset=-0.05, voffset=0.3) cb.set_label('($\mu$Amm$^{-3}$)', labelpad=0.1) ax2.set_xticks([800,900,1000]) ax2.axis(ax2.axis('tight')) if show_titles: if show_CSD: ax2.set_title('LFP & CSD', va='center') else: ax2.set_title('LFP', va='center') if show_xlabels: ax2.set_xlabel(r'$t$ (ms)', labelpad=0.) else: ax2.set_xlabel('') ############################################################################ # d part, LFP power trace for each layer ############################################################################ freqs, PSD = calc_signal_power(params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), transient=transient, Df=Df, mlab=mlab, NFFT=NFFT, noverlap=noverlap, window=window) channels = [0, 3, 7, 11, 13] # create invisible axes to position labels correctly ax_ = fig.add_subplot(gs1[:, 3]) phlp.annotate_subplot(ax_, ncols=5, nrows=1, letter=letters[3], linear_offset=0.065) if show_titles: ax_.set_title('LFP PSD',va='center') ax_.axis('off') for i, ch in enumerate(channels): ax = fig.add_subplot(gs1[i, 3]) phlp.remove_axis_junk(ax) if i == 0: ax.set_ylabel('(mV$^2$/Hz)', labelpad=0) ax.loglog(freqs[1:],PSD[ch][1:], color='k') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') if i < 4: ax.set_xticklabels([]) ax.text(0.75, 0.85,'ch. %i' %(channels[i]+1), horizontalalignment='left', verticalalignment='bottom', fontsize=6, transform=ax.transAxes) ax.tick_params(axis='y', which='minor', bottom='off') ax.axis(ax.axis('tight')) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.set_xlim(flim) ax.set_ylim(1E-7,2E-4) if i != 0 : ax.set_yticklabels([]) if show_xlabels: ax.set_xlabel('$f$ (Hz)', labelpad=0.) ############################################################################ # e part signal power ############################################################################ ax4 = fig.add_subplot(gs1[:, 4]) phlp.annotate_subplot(ax4, ncols=5, nrows=1, letter=letters[4], linear_offset=0.065) fname=os.path.join(params.savefolder, 'LFPsum.h5') im = plot_signal_power_colorplot(ax4, params, fname=fname, transient=transient, Df=Df, mlab=mlab, NFFT=NFFT, window=window, cmap=plt.cm.get_cmap('gray_r', 12), vmin=1E-7, vmax=1E-4) phlp.remove_axis_junk(ax4) ax4.set_xlim(flim) cb = phlp.colorbar(fig, ax4, im, width=0.05, height=0.5, hoffset=-0.05, voffset=0.5) cb.set_label('(mV$^2$/Hz)', labelpad=0.1) if show_titles: ax4.set_title('LFP PSD', va='center') if show_xlabels: ax4.set_xlabel(r'$f$ (Hz)', labelpad=0.) else: ax4.set_xlabel('') return fig
[ "def", "fig_network_input_structure", "(", "fig", ",", "params", ",", "bottom", "=", "0.1", ",", "top", "=", "0.9", ",", "transient", "=", "200", ",", "T", "=", "[", "800", ",", "1000", "]", ",", "Df", "=", "0.", ",", "mlab", "=", "True", ",", "N...
This figure is the top part for plotting a comparison between the PD-model and the modified-PD model
[ "This", "figure", "is", "the", "top", "part", "for", "plotting", "a", "comparison", "between", "the", "PD", "-", "model", "and", "the", "modified", "-", "PD", "model" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_08.py#L33-L301
ianclegg/winrmlib
winrmlib/api/authentication.py
HttpCredSSPAuth._get_rsa_public_key
def _get_rsa_public_key(cert): """ PyOpenSSL does not provide a public method to export the public key from a certificate as a properly formatted ASN.1 RSAPublicKey structure. There are 'hacks' which use dump_privatekey(crypto.FILETYPE_ASN1, <public_key>), but this dumps the public key within a PrivateKeyInfo structure which is not suitable for a comparison. This approach uses the PyOpenSSL CFFI bindings to invoke the i2d_RSAPublicKey() which correctly extracts the key material in an ASN.1 RSAPublicKey structure. :param cert: The ASN.1 Encoded Certificate :return: The ASN.1 Encoded RSAPublicKey structure containing the supplied certificates public Key """ openssl_pkey = cert.get_pubkey() openssl_lib = _util.binding.lib ffi = _util.binding.ffi buf = ffi.new("unsigned char **") rsa = openssl_lib.EVP_PKEY_get1_RSA(openssl_pkey._pkey) length = openssl_lib.i2d_RSAPublicKey(rsa, buf) public_key = ffi.buffer(buf[0], length)[:] ffi.gc(buf[0], openssl_lib.OPENSSL_free) return public_key
python
def _get_rsa_public_key(cert): """ PyOpenSSL does not provide a public method to export the public key from a certificate as a properly formatted ASN.1 RSAPublicKey structure. There are 'hacks' which use dump_privatekey(crypto.FILETYPE_ASN1, <public_key>), but this dumps the public key within a PrivateKeyInfo structure which is not suitable for a comparison. This approach uses the PyOpenSSL CFFI bindings to invoke the i2d_RSAPublicKey() which correctly extracts the key material in an ASN.1 RSAPublicKey structure. :param cert: The ASN.1 Encoded Certificate :return: The ASN.1 Encoded RSAPublicKey structure containing the supplied certificates public Key """ openssl_pkey = cert.get_pubkey() openssl_lib = _util.binding.lib ffi = _util.binding.ffi buf = ffi.new("unsigned char **") rsa = openssl_lib.EVP_PKEY_get1_RSA(openssl_pkey._pkey) length = openssl_lib.i2d_RSAPublicKey(rsa, buf) public_key = ffi.buffer(buf[0], length)[:] ffi.gc(buf[0], openssl_lib.OPENSSL_free) return public_key
[ "def", "_get_rsa_public_key", "(", "cert", ")", ":", "openssl_pkey", "=", "cert", ".", "get_pubkey", "(", ")", "openssl_lib", "=", "_util", ".", "binding", ".", "lib", "ffi", "=", "_util", ".", "binding", ".", "ffi", "buf", "=", "ffi", ".", "new", "(",...
PyOpenSSL does not provide a public method to export the public key from a certificate as a properly formatted ASN.1 RSAPublicKey structure. There are 'hacks' which use dump_privatekey(crypto.FILETYPE_ASN1, <public_key>), but this dumps the public key within a PrivateKeyInfo structure which is not suitable for a comparison. This approach uses the PyOpenSSL CFFI bindings to invoke the i2d_RSAPublicKey() which correctly extracts the key material in an ASN.1 RSAPublicKey structure. :param cert: The ASN.1 Encoded Certificate :return: The ASN.1 Encoded RSAPublicKey structure containing the supplied certificates public Key
[ "PyOpenSSL", "does", "not", "provide", "a", "public", "method", "to", "export", "the", "public", "key", "from", "a", "certificate", "as", "a", "properly", "formatted", "ASN", ".", "1", "RSAPublicKey", "structure", ".", "There", "are", "hacks", "which", "use"...
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/authentication.py#L564-L582
ianclegg/winrmlib
winrmlib/api/authentication.py
HttpCredSSPAuth._credssp_processor
def _credssp_processor(self, context): """ Implements a state machine :return: """ http_response = (yield) credssp_context = self._get_credssp_header(http_response) if credssp_context is None: raise Exception('The remote host did not respond with a \'www-authenticate\' header containing ' '\'CredSSP\' as an available authentication mechanism') # 1. First, secure the channel with a TLS Handshake if not credssp_context: self.tls_connection = SSL.Connection(self.tls_context) self.tls_connection.set_connect_state() while True: try: self.tls_connection.do_handshake() except SSL.WantReadError: http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096)) credssp_context = self._get_credssp_header(http_response) if credssp_context is None or not credssp_context: raise Exception('The remote host rejected the CredSSP TLS handshake') self.tls_connection.bio_write(credssp_context) else: break # add logging to display the negotiated cipher (move to a function) openssl_lib = _util.binding.lib ffi = _util.binding.ffi cipher = openssl_lib.SSL_get_current_cipher(self.tls_connection._ssl) cipher_name = ffi.string( openssl_lib.SSL_CIPHER_get_name(cipher)) log.debug("Negotiated TLS Cipher: %s", cipher_name) # 2. Send an TSRequest containing an NTLM Negotiate Request context_generator = context.initialize_security_context() negotiate_token = context_generator.send(None) log.debug("NTLM Type 1: %s", AsHex(negotiate_token)) ts_request = TSRequest() ts_request['negoTokens'] = negotiate_token self.tls_connection.send(ts_request.getData()) http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096)) # Extract and decrypt the encoded TSRequest response struct from the Negotiate header authenticate_header = self._get_credssp_header(http_response) if not authenticate_header or authenticate_header is None: raise Exception("The remote host rejected the CredSSP negotiation token") self.tls_connection.bio_write(authenticate_header) # NTLM Challenge Response and Server Public Key Validation ts_request = TSRequest() ts_request.fromString(self.tls_connection.recv(8192)) challenge_token = ts_request['negoTokens'] log.debug("NTLM Type 2: %s", AsHex(challenge_token)) server_cert = self.tls_connection.get_peer_certificate() # not using channel bindings #certificate_digest = base64.b16decode(server_cert.digest('SHA256').replace(':', '')) ## channel_binding_structure = gss_channel_bindings_struct() ## channel_binding_structure['application_data'] = "tls-server-end-point:" + certificate_digest public_key = HttpCredSSPAuth._get_rsa_public_key(server_cert) # The _RSAPublicKey must be 'wrapped' using the negotiated GSSAPI mechanism and send to the server along with # the final SPNEGO token. This step of the CredSSP protocol is designed to thwart 'man-in-the-middle' attacks # Build and encrypt the response to the server ts_request = TSRequest() type3= context_generator.send(challenge_token) log.debug("NTLM Type 3: %s", AsHex(type3)) ts_request['negoTokens'] = type3 public_key_encrypted, signature = context.wrap_message(public_key) ts_request['pubKeyAuth'] = signature + public_key_encrypted self.tls_connection.send(ts_request.getData()) enc_type3 = self.tls_connection.bio_read(8192) http_response = yield self._set_credssp_header(http_response.request, enc_type3) # TLS decrypt the response, then ASN decode and check the error code auth_response = self._get_credssp_header(http_response) if not auth_response or auth_response is None: raise Exception("The remote host rejected the challenge response") self.tls_connection.bio_write(auth_response) ts_request = TSRequest() ts_request.fromString(self.tls_connection.recv(8192)) # TODO: determine how to validate server certificate here #a = ts_request['pubKeyAuth'] # print ":".join("{:02x}".format(ord(c)) for c in a) # 4. Send the Credentials to be delegated, these are encrypted with both NTLM v2 and then by TLS tsp = TSPasswordCreds() tsp['domain'] = self.password_authenticator.get_domain() tsp['username'] = self.password_authenticator.get_username() tsp['password'] = self.password_authenticator.get_password() tsc = TSCredentials() tsc['type'] = 1 tsc['credentials'] = tsp.getData() ts_request = TSRequest() encrypted, signature = context.wrap_message(tsc.getData()) ts_request['authInfo'] = signature + encrypted self.tls_connection.send(ts_request.getData()) token = self.tls_connection.bio_read(8192) http_response.request.body = self.body http_response = yield self._set_credssp_header(self._encrypt(http_response.request, self.tls_connection), token) if http_response.status_code == 401: raise Exception('Authentication Failed')
python
def _credssp_processor(self, context): """ Implements a state machine :return: """ http_response = (yield) credssp_context = self._get_credssp_header(http_response) if credssp_context is None: raise Exception('The remote host did not respond with a \'www-authenticate\' header containing ' '\'CredSSP\' as an available authentication mechanism') # 1. First, secure the channel with a TLS Handshake if not credssp_context: self.tls_connection = SSL.Connection(self.tls_context) self.tls_connection.set_connect_state() while True: try: self.tls_connection.do_handshake() except SSL.WantReadError: http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096)) credssp_context = self._get_credssp_header(http_response) if credssp_context is None or not credssp_context: raise Exception('The remote host rejected the CredSSP TLS handshake') self.tls_connection.bio_write(credssp_context) else: break # add logging to display the negotiated cipher (move to a function) openssl_lib = _util.binding.lib ffi = _util.binding.ffi cipher = openssl_lib.SSL_get_current_cipher(self.tls_connection._ssl) cipher_name = ffi.string( openssl_lib.SSL_CIPHER_get_name(cipher)) log.debug("Negotiated TLS Cipher: %s", cipher_name) # 2. Send an TSRequest containing an NTLM Negotiate Request context_generator = context.initialize_security_context() negotiate_token = context_generator.send(None) log.debug("NTLM Type 1: %s", AsHex(negotiate_token)) ts_request = TSRequest() ts_request['negoTokens'] = negotiate_token self.tls_connection.send(ts_request.getData()) http_response = yield self._set_credssp_header(http_response.request, self.tls_connection.bio_read(4096)) # Extract and decrypt the encoded TSRequest response struct from the Negotiate header authenticate_header = self._get_credssp_header(http_response) if not authenticate_header or authenticate_header is None: raise Exception("The remote host rejected the CredSSP negotiation token") self.tls_connection.bio_write(authenticate_header) # NTLM Challenge Response and Server Public Key Validation ts_request = TSRequest() ts_request.fromString(self.tls_connection.recv(8192)) challenge_token = ts_request['negoTokens'] log.debug("NTLM Type 2: %s", AsHex(challenge_token)) server_cert = self.tls_connection.get_peer_certificate() # not using channel bindings #certificate_digest = base64.b16decode(server_cert.digest('SHA256').replace(':', '')) ## channel_binding_structure = gss_channel_bindings_struct() ## channel_binding_structure['application_data'] = "tls-server-end-point:" + certificate_digest public_key = HttpCredSSPAuth._get_rsa_public_key(server_cert) # The _RSAPublicKey must be 'wrapped' using the negotiated GSSAPI mechanism and send to the server along with # the final SPNEGO token. This step of the CredSSP protocol is designed to thwart 'man-in-the-middle' attacks # Build and encrypt the response to the server ts_request = TSRequest() type3= context_generator.send(challenge_token) log.debug("NTLM Type 3: %s", AsHex(type3)) ts_request['negoTokens'] = type3 public_key_encrypted, signature = context.wrap_message(public_key) ts_request['pubKeyAuth'] = signature + public_key_encrypted self.tls_connection.send(ts_request.getData()) enc_type3 = self.tls_connection.bio_read(8192) http_response = yield self._set_credssp_header(http_response.request, enc_type3) # TLS decrypt the response, then ASN decode and check the error code auth_response = self._get_credssp_header(http_response) if not auth_response or auth_response is None: raise Exception("The remote host rejected the challenge response") self.tls_connection.bio_write(auth_response) ts_request = TSRequest() ts_request.fromString(self.tls_connection.recv(8192)) # TODO: determine how to validate server certificate here #a = ts_request['pubKeyAuth'] # print ":".join("{:02x}".format(ord(c)) for c in a) # 4. Send the Credentials to be delegated, these are encrypted with both NTLM v2 and then by TLS tsp = TSPasswordCreds() tsp['domain'] = self.password_authenticator.get_domain() tsp['username'] = self.password_authenticator.get_username() tsp['password'] = self.password_authenticator.get_password() tsc = TSCredentials() tsc['type'] = 1 tsc['credentials'] = tsp.getData() ts_request = TSRequest() encrypted, signature = context.wrap_message(tsc.getData()) ts_request['authInfo'] = signature + encrypted self.tls_connection.send(ts_request.getData()) token = self.tls_connection.bio_read(8192) http_response.request.body = self.body http_response = yield self._set_credssp_header(self._encrypt(http_response.request, self.tls_connection), token) if http_response.status_code == 401: raise Exception('Authentication Failed')
[ "def", "_credssp_processor", "(", "self", ",", "context", ")", ":", "http_response", "=", "(", "yield", ")", "credssp_context", "=", "self", ".", "_get_credssp_header", "(", "http_response", ")", "if", "credssp_context", "is", "None", ":", "raise", "Exception", ...
Implements a state machine :return:
[ "Implements", "a", "state", "machine", ":", "return", ":" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/authentication.py#L584-L696
pebble/libpebble2
libpebble2/services/install.py
AppInstaller.install
def install(self, force_install=False): """ Installs an app. Blocks until the installation is complete, or raises :exc:`AppInstallError` if it fails. While this method runs, "progress" events will be emitted regularly with the following signature: :: (sent_this_interval, sent_total, total_size) :param force_install: Install even if installing this pbw on this platform is usually forbidden. :type force_install: bool """ if not (force_install or self._bundle.should_permit_install()): raise AppInstallError("This pbw is not supported on this platform.") if self._pebble.firmware_version.major < 3: self._install_legacy2() else: self._install_modern()
python
def install(self, force_install=False): """ Installs an app. Blocks until the installation is complete, or raises :exc:`AppInstallError` if it fails. While this method runs, "progress" events will be emitted regularly with the following signature: :: (sent_this_interval, sent_total, total_size) :param force_install: Install even if installing this pbw on this platform is usually forbidden. :type force_install: bool """ if not (force_install or self._bundle.should_permit_install()): raise AppInstallError("This pbw is not supported on this platform.") if self._pebble.firmware_version.major < 3: self._install_legacy2() else: self._install_modern()
[ "def", "install", "(", "self", ",", "force_install", "=", "False", ")", ":", "if", "not", "(", "force_install", "or", "self", ".", "_bundle", ".", "should_permit_install", "(", ")", ")", ":", "raise", "AppInstallError", "(", "\"This pbw is not supported on this ...
Installs an app. Blocks until the installation is complete, or raises :exc:`AppInstallError` if it fails. While this method runs, "progress" events will be emitted regularly with the following signature: :: (sent_this_interval, sent_total, total_size) :param force_install: Install even if installing this pbw on this platform is usually forbidden. :type force_install: bool
[ "Installs", "an", "app", ".", "Blocks", "until", "the", "installation", "is", "complete", "or", "raises", ":", "exc", ":", "AppInstallError", "if", "it", "fails", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/install.py#L54-L70
schneiderfelipe/pyrrole
pyrrole/core.py
_parse_chemical_equation
def _parse_chemical_equation(value): """ Parse the chemical equation mini-language. See the docstring of `ChemicalEquation` for more. Parameters ---------- value : `str` A string in chemical equation mini-language. Returns ------- mapping A mapping in the format specified by the mini-language (see notes on `ChemicalEquation`). Examples -------- >>> from pyrrole.core import _parse_chemical_equation >>> parsed = _parse_chemical_equation('4 A + 3 B <- 2 C + D') >>> parsed['arrow'] '->' >>> parsed['products'][1]['species'] 'B' >>> parsed['reactants'][0]['coefficient'] 2 """ arrow = _pp.oneOf('-> <- <=>').setResultsName('arrow') species = _pp.Word(_pp.printables).setResultsName('species') coefficient = (_pp.Optional(_pp.Word(_pp.nums), default=1) .setParseAction(_pp.tokenMap(int)) .setResultsName('coefficient')) group_ = _pp.Group(coefficient + _pp.Optional(_pp.Suppress('*')) + species) reactants = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_)) .setResultsName('reactants')) products = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_)) .setResultsName('products')) grammar = reactants + arrow + products parsed = grammar.parseString(value).asDict() if parsed['arrow'] == '<-': parsed['reactants'], parsed['products'] \ = parsed['products'], parsed['reactants'] parsed['arrow'] = '->' return parsed
python
def _parse_chemical_equation(value): """ Parse the chemical equation mini-language. See the docstring of `ChemicalEquation` for more. Parameters ---------- value : `str` A string in chemical equation mini-language. Returns ------- mapping A mapping in the format specified by the mini-language (see notes on `ChemicalEquation`). Examples -------- >>> from pyrrole.core import _parse_chemical_equation >>> parsed = _parse_chemical_equation('4 A + 3 B <- 2 C + D') >>> parsed['arrow'] '->' >>> parsed['products'][1]['species'] 'B' >>> parsed['reactants'][0]['coefficient'] 2 """ arrow = _pp.oneOf('-> <- <=>').setResultsName('arrow') species = _pp.Word(_pp.printables).setResultsName('species') coefficient = (_pp.Optional(_pp.Word(_pp.nums), default=1) .setParseAction(_pp.tokenMap(int)) .setResultsName('coefficient')) group_ = _pp.Group(coefficient + _pp.Optional(_pp.Suppress('*')) + species) reactants = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_)) .setResultsName('reactants')) products = ((group_ + _pp.ZeroOrMore(_pp.Suppress('+') + group_)) .setResultsName('products')) grammar = reactants + arrow + products parsed = grammar.parseString(value).asDict() if parsed['arrow'] == '<-': parsed['reactants'], parsed['products'] \ = parsed['products'], parsed['reactants'] parsed['arrow'] = '->' return parsed
[ "def", "_parse_chemical_equation", "(", "value", ")", ":", "arrow", "=", "_pp", ".", "oneOf", "(", "'-> <- <=>'", ")", ".", "setResultsName", "(", "'arrow'", ")", "species", "=", "_pp", ".", "Word", "(", "_pp", ".", "printables", ")", ".", "setResultsName"...
Parse the chemical equation mini-language. See the docstring of `ChemicalEquation` for more. Parameters ---------- value : `str` A string in chemical equation mini-language. Returns ------- mapping A mapping in the format specified by the mini-language (see notes on `ChemicalEquation`). Examples -------- >>> from pyrrole.core import _parse_chemical_equation >>> parsed = _parse_chemical_equation('4 A + 3 B <- 2 C + D') >>> parsed['arrow'] '->' >>> parsed['products'][1]['species'] 'B' >>> parsed['reactants'][0]['coefficient'] 2
[ "Parse", "the", "chemical", "equation", "mini", "-", "language", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L24-L72
schneiderfelipe/pyrrole
pyrrole/core.py
_get_chemical_equation_piece
def _get_chemical_equation_piece(species_list, coefficients): """ Produce a string from chemical species and their coefficients. Parameters ---------- species_list : iterable of `str` Iterable of chemical species. coefficients : iterable of `float` Nonzero stoichiometric coefficients. The length of `species_list` and `coefficients` must be the same. Negative values are made positive and zeros are ignored along with their respective species. Examples -------- >>> from pyrrole.core import _get_chemical_equation_piece >>> _get_chemical_equation_piece(["AcOH"], [2]) '2 AcOH' >>> _get_chemical_equation_piece(["AcO-", "H+"], [-1, -1]) 'AcO- + H+' >>> _get_chemical_equation_piece("ABCD", [-2, -1, 0, -1]) '2 A + B + D' """ def _get_token(species, coefficient): if coefficient == 1: return '{}'.format(species) else: return '{:g} {}'.format(coefficient, species) bag = [] for species, coefficient in zip(species_list, coefficients): if coefficient < 0: coefficient = -coefficient if coefficient > 0: bag.append(_get_token(species, coefficient)) return '{}'.format(' + '.join(bag))
python
def _get_chemical_equation_piece(species_list, coefficients): """ Produce a string from chemical species and their coefficients. Parameters ---------- species_list : iterable of `str` Iterable of chemical species. coefficients : iterable of `float` Nonzero stoichiometric coefficients. The length of `species_list` and `coefficients` must be the same. Negative values are made positive and zeros are ignored along with their respective species. Examples -------- >>> from pyrrole.core import _get_chemical_equation_piece >>> _get_chemical_equation_piece(["AcOH"], [2]) '2 AcOH' >>> _get_chemical_equation_piece(["AcO-", "H+"], [-1, -1]) 'AcO- + H+' >>> _get_chemical_equation_piece("ABCD", [-2, -1, 0, -1]) '2 A + B + D' """ def _get_token(species, coefficient): if coefficient == 1: return '{}'.format(species) else: return '{:g} {}'.format(coefficient, species) bag = [] for species, coefficient in zip(species_list, coefficients): if coefficient < 0: coefficient = -coefficient if coefficient > 0: bag.append(_get_token(species, coefficient)) return '{}'.format(' + '.join(bag))
[ "def", "_get_chemical_equation_piece", "(", "species_list", ",", "coefficients", ")", ":", "def", "_get_token", "(", "species", ",", "coefficient", ")", ":", "if", "coefficient", "==", "1", ":", "return", "'{}'", ".", "format", "(", "species", ")", "else", "...
Produce a string from chemical species and their coefficients. Parameters ---------- species_list : iterable of `str` Iterable of chemical species. coefficients : iterable of `float` Nonzero stoichiometric coefficients. The length of `species_list` and `coefficients` must be the same. Negative values are made positive and zeros are ignored along with their respective species. Examples -------- >>> from pyrrole.core import _get_chemical_equation_piece >>> _get_chemical_equation_piece(["AcOH"], [2]) '2 AcOH' >>> _get_chemical_equation_piece(["AcO-", "H+"], [-1, -1]) 'AcO- + H+' >>> _get_chemical_equation_piece("ABCD", [-2, -1, 0, -1]) '2 A + B + D'
[ "Produce", "a", "string", "from", "chemical", "species", "and", "their", "coefficients", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L75-L111
schneiderfelipe/pyrrole
pyrrole/core.py
_check_data
def _check_data(data): """ Check a data object for inconsistencies. Parameters ---------- data : `pandas.DataFrame` A `data` object, i.e., a table whose rows store information about chemical species, indexed by chemical species. Warns ----- UserWarning Warned if a ground state species has one or more imaginary vibrational frequencies, or if a transition state species has zero, two or more imaginary vibrational frequencies. Examples -------- >>> import pandas as pd >>> from pyrrole.core import _check_data >>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'B', 'vibfreqs': [0., -1., 2.]}, ... {'name': 'C', 'vibfreqs': [0., -1., -2.]}, ... {'name': 'A#', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'C#', 'vibfreqs': [0., -2., -1.]}, ... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}]) ... .set_index('name')) >>> _check_data(data) """ if "vibfreqs" in data.columns: for species in data.index: vibfreqs = data.loc[species, "vibfreqs"] nimagvibfreqs = _np.sum(_np.array(vibfreqs) < 0) if species[-1] == '#' and nimagvibfreqs != 1: _warnings.warn("'{}' should have 1 imaginary vibfreqs but {} " "found".format(species, nimagvibfreqs)) elif species[-1] != '#' and nimagvibfreqs != 0: _warnings.warn("'{}' should have no imaginary vibfreqs but {} " "found".format(species, nimagvibfreqs))
python
def _check_data(data): """ Check a data object for inconsistencies. Parameters ---------- data : `pandas.DataFrame` A `data` object, i.e., a table whose rows store information about chemical species, indexed by chemical species. Warns ----- UserWarning Warned if a ground state species has one or more imaginary vibrational frequencies, or if a transition state species has zero, two or more imaginary vibrational frequencies. Examples -------- >>> import pandas as pd >>> from pyrrole.core import _check_data >>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'B', 'vibfreqs': [0., -1., 2.]}, ... {'name': 'C', 'vibfreqs': [0., -1., -2.]}, ... {'name': 'A#', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'C#', 'vibfreqs': [0., -2., -1.]}, ... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}]) ... .set_index('name')) >>> _check_data(data) """ if "vibfreqs" in data.columns: for species in data.index: vibfreqs = data.loc[species, "vibfreqs"] nimagvibfreqs = _np.sum(_np.array(vibfreqs) < 0) if species[-1] == '#' and nimagvibfreqs != 1: _warnings.warn("'{}' should have 1 imaginary vibfreqs but {} " "found".format(species, nimagvibfreqs)) elif species[-1] != '#' and nimagvibfreqs != 0: _warnings.warn("'{}' should have no imaginary vibfreqs but {} " "found".format(species, nimagvibfreqs))
[ "def", "_check_data", "(", "data", ")", ":", "if", "\"vibfreqs\"", "in", "data", ".", "columns", ":", "for", "species", "in", "data", ".", "index", ":", "vibfreqs", "=", "data", ".", "loc", "[", "species", ",", "\"vibfreqs\"", "]", "nimagvibfreqs", "=", ...
Check a data object for inconsistencies. Parameters ---------- data : `pandas.DataFrame` A `data` object, i.e., a table whose rows store information about chemical species, indexed by chemical species. Warns ----- UserWarning Warned if a ground state species has one or more imaginary vibrational frequencies, or if a transition state species has zero, two or more imaginary vibrational frequencies. Examples -------- >>> import pandas as pd >>> from pyrrole.core import _check_data >>> data = (pd.DataFrame([{'name': 'A', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'B', 'vibfreqs': [0., -1., 2.]}, ... {'name': 'C', 'vibfreqs': [0., -1., -2.]}, ... {'name': 'A#', 'vibfreqs': [0., 1., 2.]}, ... {'name': 'C#', 'vibfreqs': [0., -2., -1.]}, ... {'name': 'B#', 'vibfreqs': [0., -1., 2.]}]) ... .set_index('name')) >>> _check_data(data)
[ "Check", "a", "data", "object", "for", "inconsistencies", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L114-L154
schneiderfelipe/pyrrole
pyrrole/core.py
_split_chemical_equations
def _split_chemical_equations(value): """ Split a string with sequential chemical equations into separate strings. Each string in the returned iterable represents a single chemical equation of the input. See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more. Parameters ---------- value : `str` A string with sequential chemical equations in the mini-language (see notes on `ChemicalEquation`). Returns ------- iterable of `str` An iterable of strings in the format specified by the mini-language (see notes on `ChemicalEquation`). Examples -------- >>> from pyrrole.core import _split_chemical_equations >>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I') ['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I'] """ pieces = _split_arrows(value) return [(pieces[i] + pieces[i + 1] + pieces[i + 2]).strip() for i in range(0, len(pieces) - 2, 2)]
python
def _split_chemical_equations(value): """ Split a string with sequential chemical equations into separate strings. Each string in the returned iterable represents a single chemical equation of the input. See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more. Parameters ---------- value : `str` A string with sequential chemical equations in the mini-language (see notes on `ChemicalEquation`). Returns ------- iterable of `str` An iterable of strings in the format specified by the mini-language (see notes on `ChemicalEquation`). Examples -------- >>> from pyrrole.core import _split_chemical_equations >>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I') ['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I'] """ pieces = _split_arrows(value) return [(pieces[i] + pieces[i + 1] + pieces[i + 2]).strip() for i in range(0, len(pieces) - 2, 2)]
[ "def", "_split_chemical_equations", "(", "value", ")", ":", "pieces", "=", "_split_arrows", "(", "value", ")", "return", "[", "(", "pieces", "[", "i", "]", "+", "pieces", "[", "i", "+", "1", "]", "+", "pieces", "[", "i", "+", "2", "]", ")", ".", ...
Split a string with sequential chemical equations into separate strings. Each string in the returned iterable represents a single chemical equation of the input. See the docstrings of `ChemicalEquation` and `ChemicalSystem` for more. Parameters ---------- value : `str` A string with sequential chemical equations in the mini-language (see notes on `ChemicalEquation`). Returns ------- iterable of `str` An iterable of strings in the format specified by the mini-language (see notes on `ChemicalEquation`). Examples -------- >>> from pyrrole.core import _split_chemical_equations >>> _split_chemical_equations('A + B -> C + D -> D + E <=> F + G <- H + I') ['A + B -> C + D', 'C + D -> D + E', 'D + E <=> F + G', 'F + G <- H + I']
[ "Split", "a", "string", "with", "sequential", "chemical", "equations", "into", "separate", "strings", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L584-L615
schneiderfelipe/pyrrole
pyrrole/core.py
ChemicalEquation.to_series
def to_series(self, only=None, intensive_columns=["temperature", "pressure"], check_data=True): """ Produce a data record for `ChemicalEquation`. All possible linear differences for all numeric attributes are computed and stored in the returned `pandas.Series` object (see examples below). This allows for easy application and manipulation of `Hess's law <https://en.wikipedia.org/wiki/Hess%27s_law>`_ to chemical equations (see examples below). Parameters ---------- only : ``"reactants"``, ``"products"``, optional Instead of the standard behaviour (difference of sums), sum numeric attributes of either reactants or products only. If given, absolute coefficients are used. intensive_columns : iterable of `str`, optional A set of column names representing intensive properties (e.g. bulk properties) whose values are not summable. Those must be constant throughout the chemical equation. check_data : `bool`, optional Whether to check data object for inconsistencies. Returns ------- series : `pandas.Series` Data record of attribute differences, whose name is the canonical string representation of the `ChemicalEquation` or, if `only` is given, a string representing either reactants or products (see examples below). Raises ------ ValueError Raised if `self.data` wasn't defined (e.g. is `None`), if `only` is something other than ``"reactants"`` or ``"products"``, or if two or more distinct values for an intensive property have been found. Examples -------- >>> from pyrrole import ChemicalEquation >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", ... "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", ... "AcOH(aq)")) >>> equilibrium = ChemicalEquation("AcOH(g) <=> AcOH(aq)", ... data) >>> equilibrium.to_series() charge 0.000000 enthalpy -0.010958 entropy -0.000198 freeenergy -0.010759 mult 0.000000 natom 0.000000 nbasis 0.000000 nmo 0.000000 pressure 1.000000 temperature 298.150000 Name: AcOH(g) <=> AcOH(aq), dtype: float64 Sums of either reactants or products can be computed: >>> equilibrium.to_series("reactants") charge 0.000000 enthalpy -228.533374 entropy 0.031135 freeenergy -228.564509 mult 1.000000 natom 8.000000 nbasis 68.000000 nmo 68.000000 pressure 1.000000 temperature 298.150000 Name: AcOH(g), dtype: float64 """ if self.data is None: # TODO: should an empty Series be returned? raise ValueError("data not defined") # TODO: find a way to keep categorical columns. Keep if they match? columns = self.data.select_dtypes('number').columns intensive_columns = [column for column in columns if column in intensive_columns] extensive_columns = [column for column in columns if column not in intensive_columns] columns = extensive_columns + intensive_columns if only is None: species = self.species elif only == "reactants": species = sorted(self.reactants) elif only == "products": species = sorted(self.products) else: raise ValueError("only must be either 'reactants' or 'products' " "('{}' given)".format(only)) if check_data: _check_data(self.data.loc[species]) if all([s in self.data.index for s in species]): series = (self.data.loc[species, extensive_columns] .mul(self.coefficient, axis="index").sum("index")) for column in intensive_columns: vals = self.data[column].unique() if len(vals) > 1: raise ValueError("different values for {}: " "{}".format(column, vals)) series[column] = vals[0] else: series = _pd.Series(_np.nan, index=columns) if only is None: name = self.__str__() else: coefficients = self.coefficient[species] name = _get_chemical_equation_piece(species, coefficients) if only == "reactants": series[extensive_columns] = -series[extensive_columns] # Avoid negative zero # (see https://stackoverflow.com/a/11010791/4039050) series = series + 0. return series.rename(name)
python
def to_series(self, only=None, intensive_columns=["temperature", "pressure"], check_data=True): """ Produce a data record for `ChemicalEquation`. All possible linear differences for all numeric attributes are computed and stored in the returned `pandas.Series` object (see examples below). This allows for easy application and manipulation of `Hess's law <https://en.wikipedia.org/wiki/Hess%27s_law>`_ to chemical equations (see examples below). Parameters ---------- only : ``"reactants"``, ``"products"``, optional Instead of the standard behaviour (difference of sums), sum numeric attributes of either reactants or products only. If given, absolute coefficients are used. intensive_columns : iterable of `str`, optional A set of column names representing intensive properties (e.g. bulk properties) whose values are not summable. Those must be constant throughout the chemical equation. check_data : `bool`, optional Whether to check data object for inconsistencies. Returns ------- series : `pandas.Series` Data record of attribute differences, whose name is the canonical string representation of the `ChemicalEquation` or, if `only` is given, a string representing either reactants or products (see examples below). Raises ------ ValueError Raised if `self.data` wasn't defined (e.g. is `None`), if `only` is something other than ``"reactants"`` or ``"products"``, or if two or more distinct values for an intensive property have been found. Examples -------- >>> from pyrrole import ChemicalEquation >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", ... "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", ... "AcOH(aq)")) >>> equilibrium = ChemicalEquation("AcOH(g) <=> AcOH(aq)", ... data) >>> equilibrium.to_series() charge 0.000000 enthalpy -0.010958 entropy -0.000198 freeenergy -0.010759 mult 0.000000 natom 0.000000 nbasis 0.000000 nmo 0.000000 pressure 1.000000 temperature 298.150000 Name: AcOH(g) <=> AcOH(aq), dtype: float64 Sums of either reactants or products can be computed: >>> equilibrium.to_series("reactants") charge 0.000000 enthalpy -228.533374 entropy 0.031135 freeenergy -228.564509 mult 1.000000 natom 8.000000 nbasis 68.000000 nmo 68.000000 pressure 1.000000 temperature 298.150000 Name: AcOH(g), dtype: float64 """ if self.data is None: # TODO: should an empty Series be returned? raise ValueError("data not defined") # TODO: find a way to keep categorical columns. Keep if they match? columns = self.data.select_dtypes('number').columns intensive_columns = [column for column in columns if column in intensive_columns] extensive_columns = [column for column in columns if column not in intensive_columns] columns = extensive_columns + intensive_columns if only is None: species = self.species elif only == "reactants": species = sorted(self.reactants) elif only == "products": species = sorted(self.products) else: raise ValueError("only must be either 'reactants' or 'products' " "('{}' given)".format(only)) if check_data: _check_data(self.data.loc[species]) if all([s in self.data.index for s in species]): series = (self.data.loc[species, extensive_columns] .mul(self.coefficient, axis="index").sum("index")) for column in intensive_columns: vals = self.data[column].unique() if len(vals) > 1: raise ValueError("different values for {}: " "{}".format(column, vals)) series[column] = vals[0] else: series = _pd.Series(_np.nan, index=columns) if only is None: name = self.__str__() else: coefficients = self.coefficient[species] name = _get_chemical_equation_piece(species, coefficients) if only == "reactants": series[extensive_columns] = -series[extensive_columns] # Avoid negative zero # (see https://stackoverflow.com/a/11010791/4039050) series = series + 0. return series.rename(name)
[ "def", "to_series", "(", "self", ",", "only", "=", "None", ",", "intensive_columns", "=", "[", "\"temperature\"", ",", "\"pressure\"", "]", ",", "check_data", "=", "True", ")", ":", "if", "self", ".", "data", "is", "None", ":", "# TODO: should an empty Serie...
Produce a data record for `ChemicalEquation`. All possible linear differences for all numeric attributes are computed and stored in the returned `pandas.Series` object (see examples below). This allows for easy application and manipulation of `Hess's law <https://en.wikipedia.org/wiki/Hess%27s_law>`_ to chemical equations (see examples below). Parameters ---------- only : ``"reactants"``, ``"products"``, optional Instead of the standard behaviour (difference of sums), sum numeric attributes of either reactants or products only. If given, absolute coefficients are used. intensive_columns : iterable of `str`, optional A set of column names representing intensive properties (e.g. bulk properties) whose values are not summable. Those must be constant throughout the chemical equation. check_data : `bool`, optional Whether to check data object for inconsistencies. Returns ------- series : `pandas.Series` Data record of attribute differences, whose name is the canonical string representation of the `ChemicalEquation` or, if `only` is given, a string representing either reactants or products (see examples below). Raises ------ ValueError Raised if `self.data` wasn't defined (e.g. is `None`), if `only` is something other than ``"reactants"`` or ``"products"``, or if two or more distinct values for an intensive property have been found. Examples -------- >>> from pyrrole import ChemicalEquation >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", ... "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", ... "AcOH(aq)")) >>> equilibrium = ChemicalEquation("AcOH(g) <=> AcOH(aq)", ... data) >>> equilibrium.to_series() charge 0.000000 enthalpy -0.010958 entropy -0.000198 freeenergy -0.010759 mult 0.000000 natom 0.000000 nbasis 0.000000 nmo 0.000000 pressure 1.000000 temperature 298.150000 Name: AcOH(g) <=> AcOH(aq), dtype: float64 Sums of either reactants or products can be computed: >>> equilibrium.to_series("reactants") charge 0.000000 enthalpy -228.533374 entropy 0.031135 freeenergy -228.564509 mult 1.000000 natom 8.000000 nbasis 68.000000 nmo 68.000000 pressure 1.000000 temperature 298.150000 Name: AcOH(g), dtype: float64
[ "Produce", "a", "data", "record", "for", "ChemicalEquation", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L416-L546
schneiderfelipe/pyrrole
pyrrole/core.py
ChemicalSystem.to_dataframe
def to_dataframe(self, *args, **kwargs): """ Produce a data table with records for all chemical equations. All possible differences for numeric attributes are computed and stored as columns in the returned `pandas.DataFrame` object (see examples below), whose rows represent chemical equations. In terms of behavior, this method can be seen as the `ChemicalEquation` counterpart of `create_data`. Returns ------- dataframe : `pandas.DataFrame` Data table with records of attribute differences for every single `ChemicalEquation` object in the model. Notes ----- Further arguments and keywords are passed directly to `ChemicalEquation.to_series`. Examples -------- >>> from pyrrole import ChemicalSystem >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", ... "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", ... "AcOH(aq)")) >>> data = data[["enthalpy", "entropy", "freeenergy"]] >>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data) >>> equilibrium.to_dataframe() # doctest: +NORMALIZE_WHITESPACE enthalpy entropy freeenergy chemical_equation AcOH(g) <=> AcOH(aq) -0.010958 -0.000198 -0.010759 """ dataframe = _pd.DataFrame([equation.to_series(*args, **kwargs) for equation in self.equations]) dataframe.index.name = "chemical_equation" return dataframe
python
def to_dataframe(self, *args, **kwargs): """ Produce a data table with records for all chemical equations. All possible differences for numeric attributes are computed and stored as columns in the returned `pandas.DataFrame` object (see examples below), whose rows represent chemical equations. In terms of behavior, this method can be seen as the `ChemicalEquation` counterpart of `create_data`. Returns ------- dataframe : `pandas.DataFrame` Data table with records of attribute differences for every single `ChemicalEquation` object in the model. Notes ----- Further arguments and keywords are passed directly to `ChemicalEquation.to_series`. Examples -------- >>> from pyrrole import ChemicalSystem >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", ... "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", ... "AcOH(aq)")) >>> data = data[["enthalpy", "entropy", "freeenergy"]] >>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data) >>> equilibrium.to_dataframe() # doctest: +NORMALIZE_WHITESPACE enthalpy entropy freeenergy chemical_equation AcOH(g) <=> AcOH(aq) -0.010958 -0.000198 -0.010759 """ dataframe = _pd.DataFrame([equation.to_series(*args, **kwargs) for equation in self.equations]) dataframe.index.name = "chemical_equation" return dataframe
[ "def", "to_dataframe", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "dataframe", "=", "_pd", ".", "DataFrame", "(", "[", "equation", ".", "to_series", "(", "*", "args", ",", "*", "*", "kwargs", ")", "for", "equation", "in", "se...
Produce a data table with records for all chemical equations. All possible differences for numeric attributes are computed and stored as columns in the returned `pandas.DataFrame` object (see examples below), whose rows represent chemical equations. In terms of behavior, this method can be seen as the `ChemicalEquation` counterpart of `create_data`. Returns ------- dataframe : `pandas.DataFrame` Data table with records of attribute differences for every single `ChemicalEquation` object in the model. Notes ----- Further arguments and keywords are passed directly to `ChemicalEquation.to_series`. Examples -------- >>> from pyrrole import ChemicalSystem >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", ... "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", ... "AcOH(aq)")) >>> data = data[["enthalpy", "entropy", "freeenergy"]] >>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data) >>> equilibrium.to_dataframe() # doctest: +NORMALIZE_WHITESPACE enthalpy entropy freeenergy chemical_equation AcOH(g) <=> AcOH(aq) -0.010958 -0.000198 -0.010759
[ "Produce", "a", "data", "table", "with", "records", "for", "all", "chemical", "equations", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L694-L736
schneiderfelipe/pyrrole
pyrrole/core.py
ChemicalSystem.to_digraph
def to_digraph(self, *args, **kwargs): """ Compute a directed graph for the chemical system. Returns ------- digraph : `networkx.DiGraph` Graph nodes are reactants and/or products of chemical equations, while edges represent the equations themselves. Double ended edges are used to represent equilibria. Attributes are computed with `ChemicalEquation.to_series` for each equation (see examples below). Notes ----- Further arguments and keywords are passed directly to `ChemicalEquation.to_series`. Examples -------- >>> from pyrrole import ChemicalSystem >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)")) >>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data) >>> digraph = equilibrium.to_digraph() >>> sorted(digraph.nodes(data='freeenergy')) [('AcOH(aq)', -228.57526805), ('AcOH(g)', -228.56450866)] >>> digraph.number_of_nodes() 2 >>> digraph.number_of_edges() 2 """ # TODO: make test for this digraph = _nx.DiGraph() for equation in self.equations: reactants, arrow, products = [value.strip() for value in _split_arrows(str(equation))] try: attr = equation.to_series("reactants", *args, **kwargs).to_dict() except ValueError: attr = dict() digraph.add_node(reactants, **attr) try: attr = equation.to_series("products", *args, **kwargs).to_dict() except ValueError: attr = dict() digraph.add_node(products, **attr) try: attr = equation.to_series(*args, **kwargs).to_dict() except ValueError: attr = dict() digraph.add_edge(reactants, products, **attr) if arrow == '<=>': digraph.add_edge(products, reactants, **attr) return digraph
python
def to_digraph(self, *args, **kwargs): """ Compute a directed graph for the chemical system. Returns ------- digraph : `networkx.DiGraph` Graph nodes are reactants and/or products of chemical equations, while edges represent the equations themselves. Double ended edges are used to represent equilibria. Attributes are computed with `ChemicalEquation.to_series` for each equation (see examples below). Notes ----- Further arguments and keywords are passed directly to `ChemicalEquation.to_series`. Examples -------- >>> from pyrrole import ChemicalSystem >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)")) >>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data) >>> digraph = equilibrium.to_digraph() >>> sorted(digraph.nodes(data='freeenergy')) [('AcOH(aq)', -228.57526805), ('AcOH(g)', -228.56450866)] >>> digraph.number_of_nodes() 2 >>> digraph.number_of_edges() 2 """ # TODO: make test for this digraph = _nx.DiGraph() for equation in self.equations: reactants, arrow, products = [value.strip() for value in _split_arrows(str(equation))] try: attr = equation.to_series("reactants", *args, **kwargs).to_dict() except ValueError: attr = dict() digraph.add_node(reactants, **attr) try: attr = equation.to_series("products", *args, **kwargs).to_dict() except ValueError: attr = dict() digraph.add_node(products, **attr) try: attr = equation.to_series(*args, **kwargs).to_dict() except ValueError: attr = dict() digraph.add_edge(reactants, products, **attr) if arrow == '<=>': digraph.add_edge(products, reactants, **attr) return digraph
[ "def", "to_digraph", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# TODO: make test for this", "digraph", "=", "_nx", ".", "DiGraph", "(", ")", "for", "equation", "in", "self", ".", "equations", ":", "reactants", ",", "arrow", ",", ...
Compute a directed graph for the chemical system. Returns ------- digraph : `networkx.DiGraph` Graph nodes are reactants and/or products of chemical equations, while edges represent the equations themselves. Double ended edges are used to represent equilibria. Attributes are computed with `ChemicalEquation.to_series` for each equation (see examples below). Notes ----- Further arguments and keywords are passed directly to `ChemicalEquation.to_series`. Examples -------- >>> from pyrrole import ChemicalSystem >>> from pyrrole.atoms import create_data, read_cclib >>> data = create_data( ... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"), ... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)")) >>> equilibrium = ChemicalSystem("AcOH(g) <=> AcOH(aq)", data) >>> digraph = equilibrium.to_digraph() >>> sorted(digraph.nodes(data='freeenergy')) [('AcOH(aq)', -228.57526805), ('AcOH(g)', -228.56450866)] >>> digraph.number_of_nodes() 2 >>> digraph.number_of_edges() 2
[ "Compute", "a", "directed", "graph", "for", "the", "chemical", "system", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/core.py#L738-L801
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/figure_10.py
fig_exc_inh_contrib
def fig_exc_inh_contrib(fig, axes, params, savefolders, T=[800, 1000], transient=200, panel_labels = 'FGHIJ', show_xlabels=True): ''' plot time series LFPs and CSDs with signal variances as function of depth for the cases with all synapses intact, or knocking out excitatory input or inhibitory input to the postsynaptic target region args: :: fig : axes : savefolders : list of simulation output folders T : list of ints, first and last time sample transient : int, duration of transient period returns: :: matplotlib.figure.Figure object ''' # params = multicompartment_params() # ana_params = analysis_params.params() #file name types file_names = ['CSDsum.h5', 'LFPsum.h5'] #panel titles panel_titles = [ 'LFP&CSD\nexc. syn.', 'LFP&CSD\ninh. syn.', 'LFP&CSD\ncompound', 'CSD variance', 'LFP variance',] #labels labels = [ 'exc. syn.', 'inh. syn.', 'SUM'] #some colors for traces if analysis_params.bw: colors = ['k', 'gray', 'k'] # lws = [0.75, 0.75, 1.5] lws = [1.25, 1.25, 1.25] else: colors = [analysis_params.colorE, analysis_params.colorI, 'k'] # colors = 'rbk' # lws = [0.75, 0.75, 1.5] lws = [1.25, 1.25, 1.25] #scalebar labels units = ['$\mu$A mm$^{-3}$', 'mV'] #depth of each contact site depth = params.electrodeParams['z'] # #set up figure # #figure aspect # ana_params.set_PLOS_2column_fig_style(ratio=0.5) # fig, axes = plt.subplots(1,5) # fig.subplots_adjust(left=0.06, right=0.96, wspace=0.4, hspace=0.2) #clean up for ax in axes.flatten(): phlp.remove_axis_junk(ax) for i, file_name in enumerate(file_names): #get the global data scaling bar range for use in latter plots #TODO: find nicer solution without creating figure dum_fig, dum_ax = plt.subplots(1) vlim_LFP = 0 vlim_CSD = 0 for savefolder in savefolders: vlimround0 = plot_signal_sum(dum_ax, params, os.path.join(os.path.split(params.savefolder)[0], savefolder, file_name), rasterized=False) if vlimround0 > vlim_LFP: vlim_LFP = vlimround0 im = plot_signal_sum_colorplot(dum_ax, params, os.path.join(os.path.split(params.savefolder)[0], savefolder, file_name), cmap=plt.get_cmap('gray', 21) if analysis_params.bw else plt.get_cmap('bwr_r', 21), rasterized=False) if abs(im.get_array()).max() > vlim_CSD: vlim_CSD = abs(im.get_array()).max() plt.close(dum_fig) for j, savefolder in enumerate(savefolders): ax = axes[j] if i == 1: plot_signal_sum(ax, params, os.path.join(os.path.split(params.savefolder)[0], savefolder, file_name), unit=units[i], T=T, color='k', # color='k' if analysis_params.bw else colors[j], vlimround=vlim_LFP, rasterized=False) elif i == 0: im = plot_signal_sum_colorplot(ax, params, os.path.join(os.path.split(params.savefolder)[0], savefolder, file_name), unit=r'($\mu$Amm$^{-3}$)', T=T, ylabels=True, colorbar=False, fancy=False, cmap=plt.get_cmap('gray', 21) if analysis_params.bw else plt.get_cmap('bwr_r', 21), absmax=vlim_CSD, rasterized=False) ax.axis((T[0], T[1], -1550, 50)) ax.set_title(panel_titles[j], va='baseline') if i == 0: phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=panel_labels[j]) if j != 0: ax.set_yticklabels([]) if i == 0:#and j == 2: cb = phlp.colorbar(fig, ax, im, width=0.05, height=0.5, hoffset=-0.05, voffset=0.5) cb.set_label('($\mu$Amm$^{-3}$)', labelpad=0.) ax.xaxis.set_major_locator(plt.MaxNLocator(3)) if show_xlabels: ax.set_xlabel(r'$t$ (ms)', labelpad=0.) else: ax.set_xlabel('') #power in CSD ax = axes[3] datas = [] for j, savefolder in enumerate(savefolders): f = h5py.File(os.path.join(os.path.split(params.savefolder)[0], savefolder, 'CSDsum.h5')) var = f['data'].value[:, transient:].var(axis=1) ax.semilogx(var, depth, color=colors[j], label=labels[j], lw=lws[j], clip_on=False) datas.append(f['data'].value[:, transient:]) f.close() #control variances vardiff = datas[0].var(axis=1) + datas[1].var(axis=1) + np.array([2*np.cov(x,y)[0,1] for (x,y) in zip(datas[0], datas[1])]) - datas[2].var(axis=1) #ax.semilogx(abs(vardiff), depth, color='gray', lw=1, label='control') ax.axis(ax.axis('tight')) ax.set_ylim(-1550, 50) ax.set_yticks(-np.arange(16)*100) if show_xlabels: ax.set_xlabel(r'$\sigma^2$ ($(\mu$Amm$^{-3})^2$)', labelpad=0.) ax.set_title(panel_titles[3], va='baseline') phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=panel_labels[3]) ax.set_yticklabels([]) #power in LFP ax = axes[4] datas = [] for j, savefolder in enumerate(savefolders): f = h5py.File(os.path.join(os.path.split(params.savefolder)[0], savefolder, 'LFPsum.h5')) var = f['data'].value[:, transient:].var(axis=1) ax.semilogx(var, depth, color=colors[j], label=labels[j], lw=lws[j], clip_on=False) datas.append(f['data'].value[:, transient:]) f.close() #control variances vardiff = datas[0].var(axis=1) + datas[1].var(axis=1) + np.array([2*np.cov(x,y)[0,1] for (x,y) in zip(datas[0], datas[1])]) - datas[2].var(axis=1) ax.axis(ax.axis('tight')) ax.set_ylim(-1550, 50) ax.set_yticks(-np.arange(16)*100) if show_xlabels: ax.set_xlabel(r'$\sigma^2$ (mV$^2$)', labelpad=0.) ax.set_title(panel_titles[4], va='baseline') phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=panel_labels[4]) ax.legend(bbox_to_anchor=(1.3, 1.0), frameon=False) ax.set_yticklabels([])
python
def fig_exc_inh_contrib(fig, axes, params, savefolders, T=[800, 1000], transient=200, panel_labels = 'FGHIJ', show_xlabels=True): ''' plot time series LFPs and CSDs with signal variances as function of depth for the cases with all synapses intact, or knocking out excitatory input or inhibitory input to the postsynaptic target region args: :: fig : axes : savefolders : list of simulation output folders T : list of ints, first and last time sample transient : int, duration of transient period returns: :: matplotlib.figure.Figure object ''' # params = multicompartment_params() # ana_params = analysis_params.params() #file name types file_names = ['CSDsum.h5', 'LFPsum.h5'] #panel titles panel_titles = [ 'LFP&CSD\nexc. syn.', 'LFP&CSD\ninh. syn.', 'LFP&CSD\ncompound', 'CSD variance', 'LFP variance',] #labels labels = [ 'exc. syn.', 'inh. syn.', 'SUM'] #some colors for traces if analysis_params.bw: colors = ['k', 'gray', 'k'] # lws = [0.75, 0.75, 1.5] lws = [1.25, 1.25, 1.25] else: colors = [analysis_params.colorE, analysis_params.colorI, 'k'] # colors = 'rbk' # lws = [0.75, 0.75, 1.5] lws = [1.25, 1.25, 1.25] #scalebar labels units = ['$\mu$A mm$^{-3}$', 'mV'] #depth of each contact site depth = params.electrodeParams['z'] # #set up figure # #figure aspect # ana_params.set_PLOS_2column_fig_style(ratio=0.5) # fig, axes = plt.subplots(1,5) # fig.subplots_adjust(left=0.06, right=0.96, wspace=0.4, hspace=0.2) #clean up for ax in axes.flatten(): phlp.remove_axis_junk(ax) for i, file_name in enumerate(file_names): #get the global data scaling bar range for use in latter plots #TODO: find nicer solution without creating figure dum_fig, dum_ax = plt.subplots(1) vlim_LFP = 0 vlim_CSD = 0 for savefolder in savefolders: vlimround0 = plot_signal_sum(dum_ax, params, os.path.join(os.path.split(params.savefolder)[0], savefolder, file_name), rasterized=False) if vlimround0 > vlim_LFP: vlim_LFP = vlimround0 im = plot_signal_sum_colorplot(dum_ax, params, os.path.join(os.path.split(params.savefolder)[0], savefolder, file_name), cmap=plt.get_cmap('gray', 21) if analysis_params.bw else plt.get_cmap('bwr_r', 21), rasterized=False) if abs(im.get_array()).max() > vlim_CSD: vlim_CSD = abs(im.get_array()).max() plt.close(dum_fig) for j, savefolder in enumerate(savefolders): ax = axes[j] if i == 1: plot_signal_sum(ax, params, os.path.join(os.path.split(params.savefolder)[0], savefolder, file_name), unit=units[i], T=T, color='k', # color='k' if analysis_params.bw else colors[j], vlimround=vlim_LFP, rasterized=False) elif i == 0: im = plot_signal_sum_colorplot(ax, params, os.path.join(os.path.split(params.savefolder)[0], savefolder, file_name), unit=r'($\mu$Amm$^{-3}$)', T=T, ylabels=True, colorbar=False, fancy=False, cmap=plt.get_cmap('gray', 21) if analysis_params.bw else plt.get_cmap('bwr_r', 21), absmax=vlim_CSD, rasterized=False) ax.axis((T[0], T[1], -1550, 50)) ax.set_title(panel_titles[j], va='baseline') if i == 0: phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=panel_labels[j]) if j != 0: ax.set_yticklabels([]) if i == 0:#and j == 2: cb = phlp.colorbar(fig, ax, im, width=0.05, height=0.5, hoffset=-0.05, voffset=0.5) cb.set_label('($\mu$Amm$^{-3}$)', labelpad=0.) ax.xaxis.set_major_locator(plt.MaxNLocator(3)) if show_xlabels: ax.set_xlabel(r'$t$ (ms)', labelpad=0.) else: ax.set_xlabel('') #power in CSD ax = axes[3] datas = [] for j, savefolder in enumerate(savefolders): f = h5py.File(os.path.join(os.path.split(params.savefolder)[0], savefolder, 'CSDsum.h5')) var = f['data'].value[:, transient:].var(axis=1) ax.semilogx(var, depth, color=colors[j], label=labels[j], lw=lws[j], clip_on=False) datas.append(f['data'].value[:, transient:]) f.close() #control variances vardiff = datas[0].var(axis=1) + datas[1].var(axis=1) + np.array([2*np.cov(x,y)[0,1] for (x,y) in zip(datas[0], datas[1])]) - datas[2].var(axis=1) #ax.semilogx(abs(vardiff), depth, color='gray', lw=1, label='control') ax.axis(ax.axis('tight')) ax.set_ylim(-1550, 50) ax.set_yticks(-np.arange(16)*100) if show_xlabels: ax.set_xlabel(r'$\sigma^2$ ($(\mu$Amm$^{-3})^2$)', labelpad=0.) ax.set_title(panel_titles[3], va='baseline') phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=panel_labels[3]) ax.set_yticklabels([]) #power in LFP ax = axes[4] datas = [] for j, savefolder in enumerate(savefolders): f = h5py.File(os.path.join(os.path.split(params.savefolder)[0], savefolder, 'LFPsum.h5')) var = f['data'].value[:, transient:].var(axis=1) ax.semilogx(var, depth, color=colors[j], label=labels[j], lw=lws[j], clip_on=False) datas.append(f['data'].value[:, transient:]) f.close() #control variances vardiff = datas[0].var(axis=1) + datas[1].var(axis=1) + np.array([2*np.cov(x,y)[0,1] for (x,y) in zip(datas[0], datas[1])]) - datas[2].var(axis=1) ax.axis(ax.axis('tight')) ax.set_ylim(-1550, 50) ax.set_yticks(-np.arange(16)*100) if show_xlabels: ax.set_xlabel(r'$\sigma^2$ (mV$^2$)', labelpad=0.) ax.set_title(panel_titles[4], va='baseline') phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=panel_labels[4]) ax.legend(bbox_to_anchor=(1.3, 1.0), frameon=False) ax.set_yticklabels([])
[ "def", "fig_exc_inh_contrib", "(", "fig", ",", "axes", ",", "params", ",", "savefolders", ",", "T", "=", "[", "800", ",", "1000", "]", ",", "transient", "=", "200", ",", "panel_labels", "=", "'FGHIJ'", ",", "show_xlabels", "=", "True", ")", ":", "# par...
plot time series LFPs and CSDs with signal variances as function of depth for the cases with all synapses intact, or knocking out excitatory input or inhibitory input to the postsynaptic target region args: :: fig : axes : savefolders : list of simulation output folders T : list of ints, first and last time sample transient : int, duration of transient period returns: :: matplotlib.figure.Figure object
[ "plot", "time", "series", "LFPs", "and", "CSDs", "with", "signal", "variances", "as", "function", "of", "depth", "for", "the", "cases", "with", "all", "synapses", "intact", "or", "knocking", "out", "excitatory", "input", "or", "inhibitory", "input", "to", "t...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_10.py#L12-L187
schneiderfelipe/pyrrole
pyrrole/atoms.py
read_cclib
def read_cclib(value, name=None): """ Create an `Atoms` object from data attributes parsed by cclib. `cclib <https://cclib.github.io/>`_ is an open source library, written in Python, for parsing and interpreting the results (logfiles) of computational chemistry packages. Parameters ---------- value : `str`, `cclib.parser.logfileparser.Logfile`, `cclib.parser.data.ccData` A path to a logfile, or either a cclib job object (i.e., from `cclib.ccopen`), or cclib data object (i.e., from ``job.parse()``). name : `str`, optional Name for chemical species. If not given, this is set to the logfile path, if known. Chemical equations mention this name when refering to the returned object. Returns ------- molecule : `Atoms` All attributes obtainable by cclib are made available as attributes in the returned object. Examples -------- >>> from pyrrole.atoms import read_cclib >>> molecule = read_cclib('data/pyrrolate/pyrrole.out') >>> molecule.atomnos array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32) >>> molecule.charge 0 """ if isinstance(value, _logfileparser.Logfile): # TODO: test this case. jobfilename = value.filename ccdata = value.parse() elif isinstance(value, _data.ccData): # TODO: test this case. jobfilename = None ccdata = value else: # TODO: test this case. ccobj = _cclib.ccopen(value) jobfilename = ccobj.filename ccdata = ccobj.parse() if name is None: name = jobfilename attributes = ccdata.getattributes() attributes.update({ 'name': name, 'jobfilename': jobfilename, }) return Atoms(attributes)
python
def read_cclib(value, name=None): """ Create an `Atoms` object from data attributes parsed by cclib. `cclib <https://cclib.github.io/>`_ is an open source library, written in Python, for parsing and interpreting the results (logfiles) of computational chemistry packages. Parameters ---------- value : `str`, `cclib.parser.logfileparser.Logfile`, `cclib.parser.data.ccData` A path to a logfile, or either a cclib job object (i.e., from `cclib.ccopen`), or cclib data object (i.e., from ``job.parse()``). name : `str`, optional Name for chemical species. If not given, this is set to the logfile path, if known. Chemical equations mention this name when refering to the returned object. Returns ------- molecule : `Atoms` All attributes obtainable by cclib are made available as attributes in the returned object. Examples -------- >>> from pyrrole.atoms import read_cclib >>> molecule = read_cclib('data/pyrrolate/pyrrole.out') >>> molecule.atomnos array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32) >>> molecule.charge 0 """ if isinstance(value, _logfileparser.Logfile): # TODO: test this case. jobfilename = value.filename ccdata = value.parse() elif isinstance(value, _data.ccData): # TODO: test this case. jobfilename = None ccdata = value else: # TODO: test this case. ccobj = _cclib.ccopen(value) jobfilename = ccobj.filename ccdata = ccobj.parse() if name is None: name = jobfilename attributes = ccdata.getattributes() attributes.update({ 'name': name, 'jobfilename': jobfilename, }) return Atoms(attributes)
[ "def", "read_cclib", "(", "value", ",", "name", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "_logfileparser", ".", "Logfile", ")", ":", "# TODO: test this case.", "jobfilename", "=", "value", ".", "filename", "ccdata", "=", "value", ".", ...
Create an `Atoms` object from data attributes parsed by cclib. `cclib <https://cclib.github.io/>`_ is an open source library, written in Python, for parsing and interpreting the results (logfiles) of computational chemistry packages. Parameters ---------- value : `str`, `cclib.parser.logfileparser.Logfile`, `cclib.parser.data.ccData` A path to a logfile, or either a cclib job object (i.e., from `cclib.ccopen`), or cclib data object (i.e., from ``job.parse()``). name : `str`, optional Name for chemical species. If not given, this is set to the logfile path, if known. Chemical equations mention this name when refering to the returned object. Returns ------- molecule : `Atoms` All attributes obtainable by cclib are made available as attributes in the returned object. Examples -------- >>> from pyrrole.atoms import read_cclib >>> molecule = read_cclib('data/pyrrolate/pyrrole.out') >>> molecule.atomnos array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32) >>> molecule.charge 0
[ "Create", "an", "Atoms", "object", "from", "data", "attributes", "parsed", "by", "cclib", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L491-L548
schneiderfelipe/pyrrole
pyrrole/atoms.py
read_pybel
def read_pybel(value, name=None): """ Create an `Atoms` object from content parsed by Pybel. `Pybel <https://openbabel.org/docs/dev/UseTheLibrary/Python_Pybel.html>`_ is a Python module that simplifies access to the OpenBabel API, a chemical toolbox designed to speak the many languages of chemical data. It’s an open, collaborative project allowing anyone to search, convert, analyze, or store data from molecular modeling, chemistry, solid-state materials, biochemistry, and related areas. Parameters ---------- value : `str`, `pybel.Molecule`, `openbabel.OBMol` A path to a file, or either a Pybel Molecule object, or OpenBabel OBMol. name : `str`, optional Name for chemical species. If not given, this is set to the file path, if known. Chemical equations mention this name when refering to the returned object. Returns ------- molecule : `Atoms` All attributes convertible from Pybel to cclib are made available as attributes in the returned object. Notes ----- The following attributes are converted from Pybel to cclib: `atomcoords`, `atommasses`, `atomnos`, `natom`, `charge` and `mult`. One must keep in mind that `charge` and `mult` are not always reliable, since these are often calculated from atomic formal charges. Examples -------- >>> from pyrrole.atoms import read_pybel >>> molecule = read_pybel('data/pyrrolate/pyrrole.xyz') >>> molecule.atomnos array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32) >>> molecule.natom 10 >>> molecule.charge 0 """ if isinstance(value, _pb.Molecule): # TODO: test this case. jobfilename = None charge, mult = value.charge, value.spin ccdata = _makecclib(value.OBMol) elif isinstance(value, _ob.OBMol): # TODO: test this case. jobfilename = None charge, mult = value.GetTotalCharge(), value.GetTotalSpinMultiplicity() ccdata = _makecclib(value) else: # TODO: test this case. jobfilename = value _, jobfilename_ext = _os.path.splitext(jobfilename) # TODO: This only reads first structure. mol = next(_pb.readfile(jobfilename_ext[1:], jobfilename)) charge, mult = mol.charge, mol.spin ccdata = _makecclib(mol.OBMol) if name is None: name = jobfilename attributes = ccdata.getattributes() attributes.update({ 'name': name, 'jobfilename': jobfilename, 'charge': charge, 'mult': mult }) return Atoms(attributes)
python
def read_pybel(value, name=None): """ Create an `Atoms` object from content parsed by Pybel. `Pybel <https://openbabel.org/docs/dev/UseTheLibrary/Python_Pybel.html>`_ is a Python module that simplifies access to the OpenBabel API, a chemical toolbox designed to speak the many languages of chemical data. It’s an open, collaborative project allowing anyone to search, convert, analyze, or store data from molecular modeling, chemistry, solid-state materials, biochemistry, and related areas. Parameters ---------- value : `str`, `pybel.Molecule`, `openbabel.OBMol` A path to a file, or either a Pybel Molecule object, or OpenBabel OBMol. name : `str`, optional Name for chemical species. If not given, this is set to the file path, if known. Chemical equations mention this name when refering to the returned object. Returns ------- molecule : `Atoms` All attributes convertible from Pybel to cclib are made available as attributes in the returned object. Notes ----- The following attributes are converted from Pybel to cclib: `atomcoords`, `atommasses`, `atomnos`, `natom`, `charge` and `mult`. One must keep in mind that `charge` and `mult` are not always reliable, since these are often calculated from atomic formal charges. Examples -------- >>> from pyrrole.atoms import read_pybel >>> molecule = read_pybel('data/pyrrolate/pyrrole.xyz') >>> molecule.atomnos array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32) >>> molecule.natom 10 >>> molecule.charge 0 """ if isinstance(value, _pb.Molecule): # TODO: test this case. jobfilename = None charge, mult = value.charge, value.spin ccdata = _makecclib(value.OBMol) elif isinstance(value, _ob.OBMol): # TODO: test this case. jobfilename = None charge, mult = value.GetTotalCharge(), value.GetTotalSpinMultiplicity() ccdata = _makecclib(value) else: # TODO: test this case. jobfilename = value _, jobfilename_ext = _os.path.splitext(jobfilename) # TODO: This only reads first structure. mol = next(_pb.readfile(jobfilename_ext[1:], jobfilename)) charge, mult = mol.charge, mol.spin ccdata = _makecclib(mol.OBMol) if name is None: name = jobfilename attributes = ccdata.getattributes() attributes.update({ 'name': name, 'jobfilename': jobfilename, 'charge': charge, 'mult': mult }) return Atoms(attributes)
[ "def", "read_pybel", "(", "value", ",", "name", "=", "None", ")", ":", "if", "isinstance", "(", "value", ",", "_pb", ".", "Molecule", ")", ":", "# TODO: test this case.", "jobfilename", "=", "None", "charge", ",", "mult", "=", "value", ".", "charge", ","...
Create an `Atoms` object from content parsed by Pybel. `Pybel <https://openbabel.org/docs/dev/UseTheLibrary/Python_Pybel.html>`_ is a Python module that simplifies access to the OpenBabel API, a chemical toolbox designed to speak the many languages of chemical data. It’s an open, collaborative project allowing anyone to search, convert, analyze, or store data from molecular modeling, chemistry, solid-state materials, biochemistry, and related areas. Parameters ---------- value : `str`, `pybel.Molecule`, `openbabel.OBMol` A path to a file, or either a Pybel Molecule object, or OpenBabel OBMol. name : `str`, optional Name for chemical species. If not given, this is set to the file path, if known. Chemical equations mention this name when refering to the returned object. Returns ------- molecule : `Atoms` All attributes convertible from Pybel to cclib are made available as attributes in the returned object. Notes ----- The following attributes are converted from Pybel to cclib: `atomcoords`, `atommasses`, `atomnos`, `natom`, `charge` and `mult`. One must keep in mind that `charge` and `mult` are not always reliable, since these are often calculated from atomic formal charges. Examples -------- >>> from pyrrole.atoms import read_pybel >>> molecule = read_pybel('data/pyrrolate/pyrrole.xyz') >>> molecule.atomnos array([6, 6, 6, 6, 7, 1, 1, 1, 1, 1], dtype=int32) >>> molecule.natom 10 >>> molecule.charge 0
[ "Create", "an", "Atoms", "object", "from", "content", "parsed", "by", "Pybel", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L551-L628
schneiderfelipe/pyrrole
pyrrole/atoms.py
create_data
def create_data(*args): """ Produce a single data object from an arbitrary number of different objects. This function returns a single `pandas.DataFrame` object from a collection of `Atoms` and `pandas.DataFrame` objects. The returned object, already indexed by `Atoms.name`, can be promptly used by e.g. `ChemicalSystem`. Parameters ---------- *args : `pandas.DataFrame` or `Atoms`-like All positional arguments are assumed to be sources of data. `Atoms`-like objects (i.e. any object accepted by the `Atoms` constructor) become single row records in the final returned data object. `pandas.DataFrame` data table objects, on the other hand, are concatenated together (by using `pandas.DataFrame.concat`). Returns ------- dataframe : `pandas.DataFrame` Resulting tabular data object. The returned object is guaranteed to be indexed by `Atoms.name`; if no column with this name exists at indexing time, a new column (with `None` values) is created for the purpose of indexing. Notes ----- The returned `pandas.DataFrame` will be indexed by `Atoms.name` (see examples below), which might be the same as `Atoms.jobfilename` if no name was given to the constructor of `Atoms` (e.g. mapping). Examples -------- >>> from pyrrole.atoms import Atoms, create_data, read_cclib >>> pyrrole = read_cclib('data/pyrrolate/pyrrole.out', 'pyrrole') >>> pyrrolate = read_cclib('data/pyrrolate/pyrrolate.out') >>> data = create_data(pyrrole, pyrrolate) >>> data['charge'] name pyrrole 0 data/pyrrolate/pyrrolate.out -1 Name: charge, dtype: int64 """ def _prepare_data(data): if not isinstance(data, _pd.DataFrame): try: data = _pd.DataFrame([data.to_series()]) except AttributeError: data = _pd.DataFrame([Atoms(data).to_series()]) if data.index.name != "name": if "name" not in data.columns: data["name"] = None data = data.set_index("name") return data.reset_index() args = map(_prepare_data, args) dataframe = _pd.concat(args, sort=False) return dataframe.set_index("name")
python
def create_data(*args): """ Produce a single data object from an arbitrary number of different objects. This function returns a single `pandas.DataFrame` object from a collection of `Atoms` and `pandas.DataFrame` objects. The returned object, already indexed by `Atoms.name`, can be promptly used by e.g. `ChemicalSystem`. Parameters ---------- *args : `pandas.DataFrame` or `Atoms`-like All positional arguments are assumed to be sources of data. `Atoms`-like objects (i.e. any object accepted by the `Atoms` constructor) become single row records in the final returned data object. `pandas.DataFrame` data table objects, on the other hand, are concatenated together (by using `pandas.DataFrame.concat`). Returns ------- dataframe : `pandas.DataFrame` Resulting tabular data object. The returned object is guaranteed to be indexed by `Atoms.name`; if no column with this name exists at indexing time, a new column (with `None` values) is created for the purpose of indexing. Notes ----- The returned `pandas.DataFrame` will be indexed by `Atoms.name` (see examples below), which might be the same as `Atoms.jobfilename` if no name was given to the constructor of `Atoms` (e.g. mapping). Examples -------- >>> from pyrrole.atoms import Atoms, create_data, read_cclib >>> pyrrole = read_cclib('data/pyrrolate/pyrrole.out', 'pyrrole') >>> pyrrolate = read_cclib('data/pyrrolate/pyrrolate.out') >>> data = create_data(pyrrole, pyrrolate) >>> data['charge'] name pyrrole 0 data/pyrrolate/pyrrolate.out -1 Name: charge, dtype: int64 """ def _prepare_data(data): if not isinstance(data, _pd.DataFrame): try: data = _pd.DataFrame([data.to_series()]) except AttributeError: data = _pd.DataFrame([Atoms(data).to_series()]) if data.index.name != "name": if "name" not in data.columns: data["name"] = None data = data.set_index("name") return data.reset_index() args = map(_prepare_data, args) dataframe = _pd.concat(args, sort=False) return dataframe.set_index("name")
[ "def", "create_data", "(", "*", "args", ")", ":", "def", "_prepare_data", "(", "data", ")", ":", "if", "not", "isinstance", "(", "data", ",", "_pd", ".", "DataFrame", ")", ":", "try", ":", "data", "=", "_pd", ".", "DataFrame", "(", "[", "data", "."...
Produce a single data object from an arbitrary number of different objects. This function returns a single `pandas.DataFrame` object from a collection of `Atoms` and `pandas.DataFrame` objects. The returned object, already indexed by `Atoms.name`, can be promptly used by e.g. `ChemicalSystem`. Parameters ---------- *args : `pandas.DataFrame` or `Atoms`-like All positional arguments are assumed to be sources of data. `Atoms`-like objects (i.e. any object accepted by the `Atoms` constructor) become single row records in the final returned data object. `pandas.DataFrame` data table objects, on the other hand, are concatenated together (by using `pandas.DataFrame.concat`). Returns ------- dataframe : `pandas.DataFrame` Resulting tabular data object. The returned object is guaranteed to be indexed by `Atoms.name`; if no column with this name exists at indexing time, a new column (with `None` values) is created for the purpose of indexing. Notes ----- The returned `pandas.DataFrame` will be indexed by `Atoms.name` (see examples below), which might be the same as `Atoms.jobfilename` if no name was given to the constructor of `Atoms` (e.g. mapping). Examples -------- >>> from pyrrole.atoms import Atoms, create_data, read_cclib >>> pyrrole = read_cclib('data/pyrrolate/pyrrole.out', 'pyrrole') >>> pyrrolate = read_cclib('data/pyrrolate/pyrrolate.out') >>> data = create_data(pyrrole, pyrrolate) >>> data['charge'] name pyrrole 0 data/pyrrolate/pyrrolate.out -1 Name: charge, dtype: int64
[ "Produce", "a", "single", "data", "object", "from", "an", "arbitrary", "number", "of", "different", "objects", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L631-L690
schneiderfelipe/pyrrole
pyrrole/atoms.py
Atoms.split
def split(self, pattern=None): r""" Break molecule up into constituent fragments. By default (i.e., if `pattern` is `None`), each disconnected fragment is returned as a separate new `Atoms` object. This uses OpenBabel (through `OBMol.Separate`) and might not preserve atom order, depending on your version of the library. Parameters ---------- pattern : iterable of iterable of `int`, optional Groupings of atoms into molecule fragments. Each element of `pattern` should be an iterable whose members are atom indices (see example below). Returns ------- fragments : iterable of `Atoms` Examples -------- >>> from pyrrole import atoms >>> water_dimer = atoms.read_pybel("data/water-dimer.xyz") "Natural fragmentation" is the default behaviour, i.e. all disconnected fragments are returned: >>> for frag in water_dimer.split(): ... print("{}\n".format(frag)) O -1.62893 -0.04138 0.37137 H -0.69803 -0.09168 0.09337 H -2.06663 -0.73498 -0.13663 <BLANKLINE> O 1.21457 0.03172 -0.27623 H 1.72977 -0.08038 0.53387 H 1.44927 0.91672 -0.58573 <BLANKLINE> Precise fragment grouping can be achieved by explicitly indicating which atoms belong to which fragments: >>> for frag in water_dimer.split([range(3), (5, 4), [3]]): ... print("{}\n".format(frag)) O -1.62893 -0.04138 0.37137 H -0.69803 -0.09168 0.09337 H -2.06663 -0.73498 -0.13663 <BLANKLINE> H 1.72977 -0.08038 0.53387 H 1.44927 0.91672 -0.58573 <BLANKLINE> O 1.21457 0.03172 -0.27623 <BLANKLINE> """ molecule_pybel = self.to_pybel() if pattern is None: fragments = [read_pybel(frag) for frag in molecule_pybel.OBMol.Separate()] else: fragments = [] for group in pattern: fragment_obmol = _pb.ob.OBMol() for i in group: obatom = molecule_pybel.OBMol.GetAtomById(i) fragment_obmol.InsertAtom(obatom) fragments.append(fragment_obmol) fragments = [read_pybel(frag) for frag in fragments] return fragments
python
def split(self, pattern=None): r""" Break molecule up into constituent fragments. By default (i.e., if `pattern` is `None`), each disconnected fragment is returned as a separate new `Atoms` object. This uses OpenBabel (through `OBMol.Separate`) and might not preserve atom order, depending on your version of the library. Parameters ---------- pattern : iterable of iterable of `int`, optional Groupings of atoms into molecule fragments. Each element of `pattern` should be an iterable whose members are atom indices (see example below). Returns ------- fragments : iterable of `Atoms` Examples -------- >>> from pyrrole import atoms >>> water_dimer = atoms.read_pybel("data/water-dimer.xyz") "Natural fragmentation" is the default behaviour, i.e. all disconnected fragments are returned: >>> for frag in water_dimer.split(): ... print("{}\n".format(frag)) O -1.62893 -0.04138 0.37137 H -0.69803 -0.09168 0.09337 H -2.06663 -0.73498 -0.13663 <BLANKLINE> O 1.21457 0.03172 -0.27623 H 1.72977 -0.08038 0.53387 H 1.44927 0.91672 -0.58573 <BLANKLINE> Precise fragment grouping can be achieved by explicitly indicating which atoms belong to which fragments: >>> for frag in water_dimer.split([range(3), (5, 4), [3]]): ... print("{}\n".format(frag)) O -1.62893 -0.04138 0.37137 H -0.69803 -0.09168 0.09337 H -2.06663 -0.73498 -0.13663 <BLANKLINE> H 1.72977 -0.08038 0.53387 H 1.44927 0.91672 -0.58573 <BLANKLINE> O 1.21457 0.03172 -0.27623 <BLANKLINE> """ molecule_pybel = self.to_pybel() if pattern is None: fragments = [read_pybel(frag) for frag in molecule_pybel.OBMol.Separate()] else: fragments = [] for group in pattern: fragment_obmol = _pb.ob.OBMol() for i in group: obatom = molecule_pybel.OBMol.GetAtomById(i) fragment_obmol.InsertAtom(obatom) fragments.append(fragment_obmol) fragments = [read_pybel(frag) for frag in fragments] return fragments
[ "def", "split", "(", "self", ",", "pattern", "=", "None", ")", ":", "molecule_pybel", "=", "self", ".", "to_pybel", "(", ")", "if", "pattern", "is", "None", ":", "fragments", "=", "[", "read_pybel", "(", "frag", ")", "for", "frag", "in", "molecule_pybe...
r""" Break molecule up into constituent fragments. By default (i.e., if `pattern` is `None`), each disconnected fragment is returned as a separate new `Atoms` object. This uses OpenBabel (through `OBMol.Separate`) and might not preserve atom order, depending on your version of the library. Parameters ---------- pattern : iterable of iterable of `int`, optional Groupings of atoms into molecule fragments. Each element of `pattern` should be an iterable whose members are atom indices (see example below). Returns ------- fragments : iterable of `Atoms` Examples -------- >>> from pyrrole import atoms >>> water_dimer = atoms.read_pybel("data/water-dimer.xyz") "Natural fragmentation" is the default behaviour, i.e. all disconnected fragments are returned: >>> for frag in water_dimer.split(): ... print("{}\n".format(frag)) O -1.62893 -0.04138 0.37137 H -0.69803 -0.09168 0.09337 H -2.06663 -0.73498 -0.13663 <BLANKLINE> O 1.21457 0.03172 -0.27623 H 1.72977 -0.08038 0.53387 H 1.44927 0.91672 -0.58573 <BLANKLINE> Precise fragment grouping can be achieved by explicitly indicating which atoms belong to which fragments: >>> for frag in water_dimer.split([range(3), (5, 4), [3]]): ... print("{}\n".format(frag)) O -1.62893 -0.04138 0.37137 H -0.69803 -0.09168 0.09337 H -2.06663 -0.73498 -0.13663 <BLANKLINE> H 1.72977 -0.08038 0.53387 H 1.44927 0.91672 -0.58573 <BLANKLINE> O 1.21457 0.03172 -0.27623 <BLANKLINE>
[ "r", "Break", "molecule", "up", "into", "constituent", "fragments", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L140-L212
schneiderfelipe/pyrrole
pyrrole/atoms.py
Atoms.to_pybel
def to_pybel(self): """ Produce a Pybel Molecule object. It is based on the capabilities of OpenBabel through Pybel. The present object must have at least `atomcoords`, `atomnos`, `charge` and `mult` defined. Returns ------- `pybel.Molecule` Examples -------- >>> from pyrrole.atoms import Atoms >>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.], ... [0., 0., 1.21]], ... 'atomnos': [8, 8], ... 'charge': 0, ... 'mult': 3, ... 'name': 'dioxygen'}) >>> mol = dioxygen.to_pybel() >>> mol.molwt 31.9988 """ # TODO: This only exports last geometry by default. obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge, self.mult) title = self.name or "" if 'scfenergies' in self.attributes: title += ", scfenergy={} eV".format(self.scfenergies[-1]) obmol.SetTitle(title) # TODO: make a test for this function. return _pb.Molecule(obmol)
python
def to_pybel(self): """ Produce a Pybel Molecule object. It is based on the capabilities of OpenBabel through Pybel. The present object must have at least `atomcoords`, `atomnos`, `charge` and `mult` defined. Returns ------- `pybel.Molecule` Examples -------- >>> from pyrrole.atoms import Atoms >>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.], ... [0., 0., 1.21]], ... 'atomnos': [8, 8], ... 'charge': 0, ... 'mult': 3, ... 'name': 'dioxygen'}) >>> mol = dioxygen.to_pybel() >>> mol.molwt 31.9988 """ # TODO: This only exports last geometry by default. obmol = _makeopenbabel(self.atomcoords, self.atomnos, self.charge, self.mult) title = self.name or "" if 'scfenergies' in self.attributes: title += ", scfenergy={} eV".format(self.scfenergies[-1]) obmol.SetTitle(title) # TODO: make a test for this function. return _pb.Molecule(obmol)
[ "def", "to_pybel", "(", "self", ")", ":", "# TODO: This only exports last geometry by default.", "obmol", "=", "_makeopenbabel", "(", "self", ".", "atomcoords", ",", "self", ".", "atomnos", ",", "self", ".", "charge", ",", "self", ".", "mult", ")", "title", "=...
Produce a Pybel Molecule object. It is based on the capabilities of OpenBabel through Pybel. The present object must have at least `atomcoords`, `atomnos`, `charge` and `mult` defined. Returns ------- `pybel.Molecule` Examples -------- >>> from pyrrole.atoms import Atoms >>> dioxygen = Atoms({'atomcoords': [[0., 0., 0.], ... [0., 0., 1.21]], ... 'atomnos': [8, 8], ... 'charge': 0, ... 'mult': 3, ... 'name': 'dioxygen'}) >>> mol = dioxygen.to_pybel() >>> mol.molwt 31.9988
[ "Produce", "a", "Pybel", "Molecule", "object", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L253-L289
schneiderfelipe/pyrrole
pyrrole/atoms.py
Atoms.to_string
def to_string(self, format="smi", dialect=None, with_header=False, fragment_id=None, constraints=None): r""" Produce a string representation of the molecule. This function wraps and extends the functionality of OpenBabel (which is accessible through `to_pybel`). Many chemical formats can thus be output (see the `pybel.outformats` variable for a list of available output formats). Parameters ---------- format : `str`, optional Chemical file format of the returned string representation (see examples below). dialect : `str`, optional Format dialect. This encompasses enhancements provided for some subformats. If ``"standard"`` or `None`, the output provided by OpenBabel is used with no or minimal modification. See notes below. with_header : `bool`, optional If `format` encompasses a header, allow it in the returned string. This would be, for instance, the first two lines of data for ``format="xyz"`` (see examples below). This might not work with all dialects and/or formats. fragment_id : `str`, optional Indentify molecular fragments (see examples below). This might not work with all dialects and/or formats. constraints : iterable object of `int` Set cartesian constraints for selected atoms (see examples below). This might not work with all dialects and/or formats. Returns ------- `str` String representation of molecule in the specified format and/or dialect. Raises ------ KeyError Raised if `dialect` value is currently not supported or if `fragment_id` is given with a currently not supported `dialect` value. Notes ----- Format dialects are subformats that support extended functionality. Currently supported dialects are: - for ``format="xyz"``: - ``"ADF"``, ``"ORCA"``. Examples -------- >>> from pyrrole import atoms >>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.], ... [0., 0., 1.21]], ... 'atomnos': [8, 8], ... 'charge': 0, ... 'mult': 3, ... 'name': 'dioxygen'}) By default, a SMILES string is returned: >>> dioxygen.to_string() 'O=O\tdioxygen' Cartesian coordinates can be produced with ``format="xyz"``, which is equivalent to printing an `Atoms` instance: >>> print(dioxygen.to_string("xyz")) O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 >>> print(dioxygen) O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 Header lines are disabled by default (for ``format="xyz"``, for example, the header stores the number of atoms in the molecule and a comment or title line), but this can be reversed with ``with_header=True``: >>> print(dioxygen.to_string("xyz", with_header=True)) 2 dioxygen O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 Coordinates for packages such as GAMESS and MOPAC are also supported: >>> water_dimer = atoms.read_pybel("data/water-dimer.xyz") >>> print(water_dimer.to_string("gamin")) O 8.0 -1.6289300000 -0.0413800000 0.3713700000 H 1.0 -0.6980300000 -0.0916800000 0.0933700000 H 1.0 -2.0666300000 -0.7349800000 -0.1366300000 O 8.0 1.2145700000 0.0317200000 -0.2762300000 H 1.0 1.4492700000 0.9167200000 -0.5857300000 H 1.0 1.7297700000 -0.0803800000 0.5338700000 >>> print(water_dimer.to_string("mop")) O -1.62893 1 -0.04138 1 0.37137 1 H -0.69803 1 -0.09168 1 0.09337 1 H -2.06663 1 -0.73498 1 -0.13663 1 O 1.21457 1 0.03172 1 -0.27623 1 H 1.44927 1 0.91672 1 -0.58573 1 H 1.72977 1 -0.08038 1 0.53387 1 Constraining of cartesian coordinates works with MOPAC format: >>> print(water_dimer.to_string("mop", constraints=(0, 3))) O -1.62893 0 -0.04138 0 0.37137 0 H -0.69803 1 -0.09168 1 0.09337 1 H -2.06663 1 -0.73498 1 -0.13663 1 O 1.21457 0 0.03172 0 -0.27623 0 H 1.44927 1 0.91672 1 -0.58573 1 H 1.72977 1 -0.08038 1 0.53387 1 Fragment identification is supported for ``"ADF"`` and ``"ORCA"`` dialects: >>> print(water_dimer.to_string("xyz", dialect="ADF", ... fragment_id="dimer")) O -1.62893 -0.04138 0.37137 f=dimer H -0.69803 -0.09168 0.09337 f=dimer H -2.06663 -0.73498 -0.13663 f=dimer O 1.21457 0.03172 -0.27623 f=dimer H 1.44927 0.91672 -0.58573 f=dimer H 1.72977 -0.08038 0.53387 f=dimer >>> print(water_dimer.to_string("xyz", dialect="ORCA", ... fragment_id=1)) O(1) -1.62893 -0.04138 0.37137 H(1) -0.69803 -0.09168 0.09337 H(1) -2.06663 -0.73498 -0.13663 O(1) 1.21457 0.03172 -0.27623 H(1) 1.44927 0.91672 -0.58573 H(1) 1.72977 -0.08038 0.53387 """ s = self.to_pybel().write(format).strip() if dialect is None: dialect = "standard" dialect = dialect.lower() if format == "xyz": natom, comment, body = s.split("\n", 2) if dialect in {"adf", "orca", "standard"}: if fragment_id is not None: if dialect == "adf": body = \ "\n".join(["{} f={}".format(line, fragment_id) for line in body.split("\n")]) elif dialect == "orca": fragment_id = "({})".format(fragment_id) body = \ "\n".join([line.replace(" " * len(fragment_id), fragment_id, 1) for line in body.split("\n")]) else: raise KeyError("fragment_id currently not supported " "with dialect '{}'".format(dialect)) else: raise KeyError("dialect '{}' currently not " "supported".format(dialect)) if with_header: s = "\n".join([natom, comment, body]) else: s = body elif format == "gamin": lines = s.split("\n") begin = "\n".join([line.strip() for line in lines[:5]]) body = "\n".join([line.strip() for line in lines[5:-1]]) if with_header: s = "\n".join([begin, body]) else: s = body elif format == "mop": chunks = s.split("\n", 2) begin = "\n".join([line.strip() for line in chunks[:2]]) body = chunks[2].strip() if constraints is not None: body = body.split("\n") for i in constraints: body[i] = _re.sub(' 1( |$)', ' 0\g<1>', body[i]) body = "\n".join(body) if with_header: s = "\n".join([begin, body]) else: s = body return s.strip()
python
def to_string(self, format="smi", dialect=None, with_header=False, fragment_id=None, constraints=None): r""" Produce a string representation of the molecule. This function wraps and extends the functionality of OpenBabel (which is accessible through `to_pybel`). Many chemical formats can thus be output (see the `pybel.outformats` variable for a list of available output formats). Parameters ---------- format : `str`, optional Chemical file format of the returned string representation (see examples below). dialect : `str`, optional Format dialect. This encompasses enhancements provided for some subformats. If ``"standard"`` or `None`, the output provided by OpenBabel is used with no or minimal modification. See notes below. with_header : `bool`, optional If `format` encompasses a header, allow it in the returned string. This would be, for instance, the first two lines of data for ``format="xyz"`` (see examples below). This might not work with all dialects and/or formats. fragment_id : `str`, optional Indentify molecular fragments (see examples below). This might not work with all dialects and/or formats. constraints : iterable object of `int` Set cartesian constraints for selected atoms (see examples below). This might not work with all dialects and/or formats. Returns ------- `str` String representation of molecule in the specified format and/or dialect. Raises ------ KeyError Raised if `dialect` value is currently not supported or if `fragment_id` is given with a currently not supported `dialect` value. Notes ----- Format dialects are subformats that support extended functionality. Currently supported dialects are: - for ``format="xyz"``: - ``"ADF"``, ``"ORCA"``. Examples -------- >>> from pyrrole import atoms >>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.], ... [0., 0., 1.21]], ... 'atomnos': [8, 8], ... 'charge': 0, ... 'mult': 3, ... 'name': 'dioxygen'}) By default, a SMILES string is returned: >>> dioxygen.to_string() 'O=O\tdioxygen' Cartesian coordinates can be produced with ``format="xyz"``, which is equivalent to printing an `Atoms` instance: >>> print(dioxygen.to_string("xyz")) O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 >>> print(dioxygen) O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 Header lines are disabled by default (for ``format="xyz"``, for example, the header stores the number of atoms in the molecule and a comment or title line), but this can be reversed with ``with_header=True``: >>> print(dioxygen.to_string("xyz", with_header=True)) 2 dioxygen O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 Coordinates for packages such as GAMESS and MOPAC are also supported: >>> water_dimer = atoms.read_pybel("data/water-dimer.xyz") >>> print(water_dimer.to_string("gamin")) O 8.0 -1.6289300000 -0.0413800000 0.3713700000 H 1.0 -0.6980300000 -0.0916800000 0.0933700000 H 1.0 -2.0666300000 -0.7349800000 -0.1366300000 O 8.0 1.2145700000 0.0317200000 -0.2762300000 H 1.0 1.4492700000 0.9167200000 -0.5857300000 H 1.0 1.7297700000 -0.0803800000 0.5338700000 >>> print(water_dimer.to_string("mop")) O -1.62893 1 -0.04138 1 0.37137 1 H -0.69803 1 -0.09168 1 0.09337 1 H -2.06663 1 -0.73498 1 -0.13663 1 O 1.21457 1 0.03172 1 -0.27623 1 H 1.44927 1 0.91672 1 -0.58573 1 H 1.72977 1 -0.08038 1 0.53387 1 Constraining of cartesian coordinates works with MOPAC format: >>> print(water_dimer.to_string("mop", constraints=(0, 3))) O -1.62893 0 -0.04138 0 0.37137 0 H -0.69803 1 -0.09168 1 0.09337 1 H -2.06663 1 -0.73498 1 -0.13663 1 O 1.21457 0 0.03172 0 -0.27623 0 H 1.44927 1 0.91672 1 -0.58573 1 H 1.72977 1 -0.08038 1 0.53387 1 Fragment identification is supported for ``"ADF"`` and ``"ORCA"`` dialects: >>> print(water_dimer.to_string("xyz", dialect="ADF", ... fragment_id="dimer")) O -1.62893 -0.04138 0.37137 f=dimer H -0.69803 -0.09168 0.09337 f=dimer H -2.06663 -0.73498 -0.13663 f=dimer O 1.21457 0.03172 -0.27623 f=dimer H 1.44927 0.91672 -0.58573 f=dimer H 1.72977 -0.08038 0.53387 f=dimer >>> print(water_dimer.to_string("xyz", dialect="ORCA", ... fragment_id=1)) O(1) -1.62893 -0.04138 0.37137 H(1) -0.69803 -0.09168 0.09337 H(1) -2.06663 -0.73498 -0.13663 O(1) 1.21457 0.03172 -0.27623 H(1) 1.44927 0.91672 -0.58573 H(1) 1.72977 -0.08038 0.53387 """ s = self.to_pybel().write(format).strip() if dialect is None: dialect = "standard" dialect = dialect.lower() if format == "xyz": natom, comment, body = s.split("\n", 2) if dialect in {"adf", "orca", "standard"}: if fragment_id is not None: if dialect == "adf": body = \ "\n".join(["{} f={}".format(line, fragment_id) for line in body.split("\n")]) elif dialect == "orca": fragment_id = "({})".format(fragment_id) body = \ "\n".join([line.replace(" " * len(fragment_id), fragment_id, 1) for line in body.split("\n")]) else: raise KeyError("fragment_id currently not supported " "with dialect '{}'".format(dialect)) else: raise KeyError("dialect '{}' currently not " "supported".format(dialect)) if with_header: s = "\n".join([natom, comment, body]) else: s = body elif format == "gamin": lines = s.split("\n") begin = "\n".join([line.strip() for line in lines[:5]]) body = "\n".join([line.strip() for line in lines[5:-1]]) if with_header: s = "\n".join([begin, body]) else: s = body elif format == "mop": chunks = s.split("\n", 2) begin = "\n".join([line.strip() for line in chunks[:2]]) body = chunks[2].strip() if constraints is not None: body = body.split("\n") for i in constraints: body[i] = _re.sub(' 1( |$)', ' 0\g<1>', body[i]) body = "\n".join(body) if with_header: s = "\n".join([begin, body]) else: s = body return s.strip()
[ "def", "to_string", "(", "self", ",", "format", "=", "\"smi\"", ",", "dialect", "=", "None", ",", "with_header", "=", "False", ",", "fragment_id", "=", "None", ",", "constraints", "=", "None", ")", ":", "s", "=", "self", ".", "to_pybel", "(", ")", "....
r""" Produce a string representation of the molecule. This function wraps and extends the functionality of OpenBabel (which is accessible through `to_pybel`). Many chemical formats can thus be output (see the `pybel.outformats` variable for a list of available output formats). Parameters ---------- format : `str`, optional Chemical file format of the returned string representation (see examples below). dialect : `str`, optional Format dialect. This encompasses enhancements provided for some subformats. If ``"standard"`` or `None`, the output provided by OpenBabel is used with no or minimal modification. See notes below. with_header : `bool`, optional If `format` encompasses a header, allow it in the returned string. This would be, for instance, the first two lines of data for ``format="xyz"`` (see examples below). This might not work with all dialects and/or formats. fragment_id : `str`, optional Indentify molecular fragments (see examples below). This might not work with all dialects and/or formats. constraints : iterable object of `int` Set cartesian constraints for selected atoms (see examples below). This might not work with all dialects and/or formats. Returns ------- `str` String representation of molecule in the specified format and/or dialect. Raises ------ KeyError Raised if `dialect` value is currently not supported or if `fragment_id` is given with a currently not supported `dialect` value. Notes ----- Format dialects are subformats that support extended functionality. Currently supported dialects are: - for ``format="xyz"``: - ``"ADF"``, ``"ORCA"``. Examples -------- >>> from pyrrole import atoms >>> dioxygen = atoms.Atoms({'atomcoords': [[0., 0., 0.], ... [0., 0., 1.21]], ... 'atomnos': [8, 8], ... 'charge': 0, ... 'mult': 3, ... 'name': 'dioxygen'}) By default, a SMILES string is returned: >>> dioxygen.to_string() 'O=O\tdioxygen' Cartesian coordinates can be produced with ``format="xyz"``, which is equivalent to printing an `Atoms` instance: >>> print(dioxygen.to_string("xyz")) O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 >>> print(dioxygen) O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 Header lines are disabled by default (for ``format="xyz"``, for example, the header stores the number of atoms in the molecule and a comment or title line), but this can be reversed with ``with_header=True``: >>> print(dioxygen.to_string("xyz", with_header=True)) 2 dioxygen O 0.00000 0.00000 0.00000 O 0.00000 0.00000 1.21000 Coordinates for packages such as GAMESS and MOPAC are also supported: >>> water_dimer = atoms.read_pybel("data/water-dimer.xyz") >>> print(water_dimer.to_string("gamin")) O 8.0 -1.6289300000 -0.0413800000 0.3713700000 H 1.0 -0.6980300000 -0.0916800000 0.0933700000 H 1.0 -2.0666300000 -0.7349800000 -0.1366300000 O 8.0 1.2145700000 0.0317200000 -0.2762300000 H 1.0 1.4492700000 0.9167200000 -0.5857300000 H 1.0 1.7297700000 -0.0803800000 0.5338700000 >>> print(water_dimer.to_string("mop")) O -1.62893 1 -0.04138 1 0.37137 1 H -0.69803 1 -0.09168 1 0.09337 1 H -2.06663 1 -0.73498 1 -0.13663 1 O 1.21457 1 0.03172 1 -0.27623 1 H 1.44927 1 0.91672 1 -0.58573 1 H 1.72977 1 -0.08038 1 0.53387 1 Constraining of cartesian coordinates works with MOPAC format: >>> print(water_dimer.to_string("mop", constraints=(0, 3))) O -1.62893 0 -0.04138 0 0.37137 0 H -0.69803 1 -0.09168 1 0.09337 1 H -2.06663 1 -0.73498 1 -0.13663 1 O 1.21457 0 0.03172 0 -0.27623 0 H 1.44927 1 0.91672 1 -0.58573 1 H 1.72977 1 -0.08038 1 0.53387 1 Fragment identification is supported for ``"ADF"`` and ``"ORCA"`` dialects: >>> print(water_dimer.to_string("xyz", dialect="ADF", ... fragment_id="dimer")) O -1.62893 -0.04138 0.37137 f=dimer H -0.69803 -0.09168 0.09337 f=dimer H -2.06663 -0.73498 -0.13663 f=dimer O 1.21457 0.03172 -0.27623 f=dimer H 1.44927 0.91672 -0.58573 f=dimer H 1.72977 -0.08038 0.53387 f=dimer >>> print(water_dimer.to_string("xyz", dialect="ORCA", ... fragment_id=1)) O(1) -1.62893 -0.04138 0.37137 H(1) -0.69803 -0.09168 0.09337 H(1) -2.06663 -0.73498 -0.13663 O(1) 1.21457 0.03172 -0.27623 H(1) 1.44927 0.91672 -0.58573 H(1) 1.72977 -0.08038 0.53387
[ "r", "Produce", "a", "string", "representation", "of", "the", "molecule", "." ]
train
https://github.com/schneiderfelipe/pyrrole/blob/13e26accc9a059f0ab69773648b24292fe1fbfd6/pyrrole/atoms.py#L291-L488
arve0/fijibin
fijibin/macro.py
run
def run(macro, output_files=[], force_close=True): """ Runs Fiji with the suplied macro. Output of Fiji can be viewed by setting environment variable `DEBUG=fijibin`. Parameters ---------- macro : string or list of strings IJM-macro(s) to run. If list of strings, it will be joined with a space, so all statements should end with ``;``. output_files : list Files to check if exists after macro has been run. Files specified that do not exist after macro is done will print a warning message. force_close : bool Will add ``eval("script", "System.exit(42);");`` to end of macro. Exit code 42 is used to overcome that errors in macro efficiently will exit Fiji with error code 0. In other words, if this line in the macro is reached, the macro has most probably finished without errors. This is the default behaviour. One should also note that Fiji doesn't terminate right away if ``System.exit()`` is left out, and it may take several minutes for Fiji to close. Returns ------- int Files from output_files which exists after running macro. """ if type(macro) == list: macro = ' '.join(macro) if len(macro) == 0: print('fijibin.macro.run got empty macro, not starting fiji') return _exists(output_files) if force_close: # make sure fiji halts immediately when done # hack: use error code 42 to check if macro has run sucessfully macro = macro + 'eval("script", "System.exit(42);");' # escape backslashes (windows file names) # not \ \ not \ g1 \\ g2 macro = re.sub(r"([^\\])\\([^\\])", r"\1\\\\\2", macro) debug('macro {}'.format(macro)) # avoid verbose output of Fiji when DEBUG environment variable set env = os.environ.copy() debugging = False if 'DEBUG' in env: if env['DEBUG'] == 'fijibin' or env['DEBUG'] == '*': debugging = True del env['DEBUG'] fptr, temp_filename = mkstemp(suffix='.ijm') m = os.fdopen(fptr, 'w') m.write(macro) m.flush() # make sure macro is written before running Fiji m.close() cmd = [fijibin.BIN, '--headless', '-macro', temp_filename] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = proc.communicate() for line in out.decode('latin1', errors='ignore').splitlines(): debug('stdout:' + line) for line in err.decode('latin1', errors='ignore').splitlines(): debug('stderr:' + line) if force_close and proc.returncode != 42: print('fijibin ERROR: Fiji did not successfully ' + 'run macro {}'.format(temp_filename)) if not debugging: print('fijibin Try running script with ' + '`DEBUG=fijibin python your_script.py`') else: # only delete if everything is ok os.remove(temp_filename) # return output_files which exists return _exists(output_files)
python
def run(macro, output_files=[], force_close=True): """ Runs Fiji with the suplied macro. Output of Fiji can be viewed by setting environment variable `DEBUG=fijibin`. Parameters ---------- macro : string or list of strings IJM-macro(s) to run. If list of strings, it will be joined with a space, so all statements should end with ``;``. output_files : list Files to check if exists after macro has been run. Files specified that do not exist after macro is done will print a warning message. force_close : bool Will add ``eval("script", "System.exit(42);");`` to end of macro. Exit code 42 is used to overcome that errors in macro efficiently will exit Fiji with error code 0. In other words, if this line in the macro is reached, the macro has most probably finished without errors. This is the default behaviour. One should also note that Fiji doesn't terminate right away if ``System.exit()`` is left out, and it may take several minutes for Fiji to close. Returns ------- int Files from output_files which exists after running macro. """ if type(macro) == list: macro = ' '.join(macro) if len(macro) == 0: print('fijibin.macro.run got empty macro, not starting fiji') return _exists(output_files) if force_close: # make sure fiji halts immediately when done # hack: use error code 42 to check if macro has run sucessfully macro = macro + 'eval("script", "System.exit(42);");' # escape backslashes (windows file names) # not \ \ not \ g1 \\ g2 macro = re.sub(r"([^\\])\\([^\\])", r"\1\\\\\2", macro) debug('macro {}'.format(macro)) # avoid verbose output of Fiji when DEBUG environment variable set env = os.environ.copy() debugging = False if 'DEBUG' in env: if env['DEBUG'] == 'fijibin' or env['DEBUG'] == '*': debugging = True del env['DEBUG'] fptr, temp_filename = mkstemp(suffix='.ijm') m = os.fdopen(fptr, 'w') m.write(macro) m.flush() # make sure macro is written before running Fiji m.close() cmd = [fijibin.BIN, '--headless', '-macro', temp_filename] proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) out, err = proc.communicate() for line in out.decode('latin1', errors='ignore').splitlines(): debug('stdout:' + line) for line in err.decode('latin1', errors='ignore').splitlines(): debug('stderr:' + line) if force_close and proc.returncode != 42: print('fijibin ERROR: Fiji did not successfully ' + 'run macro {}'.format(temp_filename)) if not debugging: print('fijibin Try running script with ' + '`DEBUG=fijibin python your_script.py`') else: # only delete if everything is ok os.remove(temp_filename) # return output_files which exists return _exists(output_files)
[ "def", "run", "(", "macro", ",", "output_files", "=", "[", "]", ",", "force_close", "=", "True", ")", ":", "if", "type", "(", "macro", ")", "==", "list", ":", "macro", "=", "' '", ".", "join", "(", "macro", ")", "if", "len", "(", "macro", ")", ...
Runs Fiji with the suplied macro. Output of Fiji can be viewed by setting environment variable `DEBUG=fijibin`. Parameters ---------- macro : string or list of strings IJM-macro(s) to run. If list of strings, it will be joined with a space, so all statements should end with ``;``. output_files : list Files to check if exists after macro has been run. Files specified that do not exist after macro is done will print a warning message. force_close : bool Will add ``eval("script", "System.exit(42);");`` to end of macro. Exit code 42 is used to overcome that errors in macro efficiently will exit Fiji with error code 0. In other words, if this line in the macro is reached, the macro has most probably finished without errors. This is the default behaviour. One should also note that Fiji doesn't terminate right away if ``System.exit()`` is left out, and it may take several minutes for Fiji to close. Returns ------- int Files from output_files which exists after running macro.
[ "Runs", "Fiji", "with", "the", "suplied", "macro", ".", "Output", "of", "Fiji", "can", "be", "viewed", "by", "setting", "environment", "variable", "DEBUG", "=", "fijibin", "." ]
train
https://github.com/arve0/fijibin/blob/a3d2e983cb9ff2bcbb56a800084bc3b35cb9292f/fijibin/macro.py#L16-L97
arve0/fijibin
fijibin/macro.py
stitch
def stitch(folder, filenames, x_size, y_size, output_filename, x_start=0, y_start=0, overlap=10): """ Creates a Fiji Grid/Collection stitching macro. Parameters are the same as in the plugin and are described in further detail here: http://fiji.sc/Image_Stitching#Grid.2FCollection_Stitching. **Default stitch parameters:** * Filename defined positions * Compute overlap * Subpixel accurancy * Save computation time (but use more RAM) * Fusion method: Linear blending * Regression threshold: 0.30 * Max/avg displacement threshold: 2.50 * Absolute displacement threshold: 3.50 Parameters ---------- folder : string Path to folder with images or folders with images. Example: */path/to/slide--S00/chamber--U01--V02/* filenames : string Filenames of images. Example: *field-X{xx}-Y{yy}/image-X{xx}-Y{yy}.ome.tif* x_size : int Size of grid, number of images in x direction. y_size : int Size of grid, number of images in y direction. output_filename : string Where to store fused image. Should be `.png`. x_start : int Which x position grid start with. y_start : int Which y position grid start with. overlap : number Tile overlap in percent. Fiji will find the optimal overlap, but a precise overlap assumption will decrase computation time. Returns ------- string IJM-macro. """ macro = [] macro.append('run("Grid/Collection stitching",') macro.append('"type=[Filename defined position]') macro.append('order=[Defined by filename ]') macro.append('grid_size_x={}'.format(x_size)) macro.append('grid_size_y={}'.format(y_size)) macro.append('tile_overlap={}'.format(overlap)) macro.append('first_file_index_x={}'.format(x_start)) macro.append('first_file_index_y={}'.format(y_start)) macro.append('directory=[{}]'.format(folder)) macro.append('file_names=[{}]'.format(filenames)) macro.append('output_textfile_name=TileConfiguration.txt') macro.append('fusion_method=[Linear Blending]') macro.append('regression_threshold=0.20') macro.append('max/avg_displacement_threshold=2.50') macro.append('absolute_displacement_threshold=3.50') macro.append('compute_overlap') macro.append('subpixel_accuracy') macro.append('computation_parameters=[Save computation time (but use more RAM)]') # use display, such that we can specify output filename # this is 'Fused and display' for previous stitching version!! macro.append('image_output=[Fuse and display]");') # save to png macro.append('selectWindow("Fused");') macro.append('saveAs("PNG", "{}");'.format(output_filename)) macro.append('close();') return ' '.join(macro)
python
def stitch(folder, filenames, x_size, y_size, output_filename, x_start=0, y_start=0, overlap=10): """ Creates a Fiji Grid/Collection stitching macro. Parameters are the same as in the plugin and are described in further detail here: http://fiji.sc/Image_Stitching#Grid.2FCollection_Stitching. **Default stitch parameters:** * Filename defined positions * Compute overlap * Subpixel accurancy * Save computation time (but use more RAM) * Fusion method: Linear blending * Regression threshold: 0.30 * Max/avg displacement threshold: 2.50 * Absolute displacement threshold: 3.50 Parameters ---------- folder : string Path to folder with images or folders with images. Example: */path/to/slide--S00/chamber--U01--V02/* filenames : string Filenames of images. Example: *field-X{xx}-Y{yy}/image-X{xx}-Y{yy}.ome.tif* x_size : int Size of grid, number of images in x direction. y_size : int Size of grid, number of images in y direction. output_filename : string Where to store fused image. Should be `.png`. x_start : int Which x position grid start with. y_start : int Which y position grid start with. overlap : number Tile overlap in percent. Fiji will find the optimal overlap, but a precise overlap assumption will decrase computation time. Returns ------- string IJM-macro. """ macro = [] macro.append('run("Grid/Collection stitching",') macro.append('"type=[Filename defined position]') macro.append('order=[Defined by filename ]') macro.append('grid_size_x={}'.format(x_size)) macro.append('grid_size_y={}'.format(y_size)) macro.append('tile_overlap={}'.format(overlap)) macro.append('first_file_index_x={}'.format(x_start)) macro.append('first_file_index_y={}'.format(y_start)) macro.append('directory=[{}]'.format(folder)) macro.append('file_names=[{}]'.format(filenames)) macro.append('output_textfile_name=TileConfiguration.txt') macro.append('fusion_method=[Linear Blending]') macro.append('regression_threshold=0.20') macro.append('max/avg_displacement_threshold=2.50') macro.append('absolute_displacement_threshold=3.50') macro.append('compute_overlap') macro.append('subpixel_accuracy') macro.append('computation_parameters=[Save computation time (but use more RAM)]') # use display, such that we can specify output filename # this is 'Fused and display' for previous stitching version!! macro.append('image_output=[Fuse and display]");') # save to png macro.append('selectWindow("Fused");') macro.append('saveAs("PNG", "{}");'.format(output_filename)) macro.append('close();') return ' '.join(macro)
[ "def", "stitch", "(", "folder", ",", "filenames", ",", "x_size", ",", "y_size", ",", "output_filename", ",", "x_start", "=", "0", ",", "y_start", "=", "0", ",", "overlap", "=", "10", ")", ":", "macro", "=", "[", "]", "macro", ".", "append", "(", "'...
Creates a Fiji Grid/Collection stitching macro. Parameters are the same as in the plugin and are described in further detail here: http://fiji.sc/Image_Stitching#Grid.2FCollection_Stitching. **Default stitch parameters:** * Filename defined positions * Compute overlap * Subpixel accurancy * Save computation time (but use more RAM) * Fusion method: Linear blending * Regression threshold: 0.30 * Max/avg displacement threshold: 2.50 * Absolute displacement threshold: 3.50 Parameters ---------- folder : string Path to folder with images or folders with images. Example: */path/to/slide--S00/chamber--U01--V02/* filenames : string Filenames of images. Example: *field-X{xx}-Y{yy}/image-X{xx}-Y{yy}.ome.tif* x_size : int Size of grid, number of images in x direction. y_size : int Size of grid, number of images in y direction. output_filename : string Where to store fused image. Should be `.png`. x_start : int Which x position grid start with. y_start : int Which y position grid start with. overlap : number Tile overlap in percent. Fiji will find the optimal overlap, but a precise overlap assumption will decrase computation time. Returns ------- string IJM-macro.
[ "Creates", "a", "Fiji", "Grid", "/", "Collection", "stitching", "macro", ".", "Parameters", "are", "the", "same", "as", "in", "the", "plugin", "and", "are", "described", "in", "further", "detail", "here", ":", "http", ":", "//", "fiji", ".", "sc", "/", ...
train
https://github.com/arve0/fijibin/blob/a3d2e983cb9ff2bcbb56a800084bc3b35cb9292f/fijibin/macro.py#L102-L176
arve0/fijibin
fijibin/macro.py
_exists
def _exists(filenames): """Check if every filename exists. If not, print an error message and remove the item from the list. Parameters ---------- filenames : list List of filenames to check for existence. Returns ------- list Filtered list of filenames that exists. """ exists = [] for filename in filenames: if os.path.isfile(filename): exists.append(filename) else: print('fijibin ERROR missing output file {}'.format(filename)) return exists
python
def _exists(filenames): """Check if every filename exists. If not, print an error message and remove the item from the list. Parameters ---------- filenames : list List of filenames to check for existence. Returns ------- list Filtered list of filenames that exists. """ exists = [] for filename in filenames: if os.path.isfile(filename): exists.append(filename) else: print('fijibin ERROR missing output file {}'.format(filename)) return exists
[ "def", "_exists", "(", "filenames", ")", ":", "exists", "=", "[", "]", "for", "filename", "in", "filenames", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "exists", ".", "append", "(", "filename", ")", "else", ":", "print", ...
Check if every filename exists. If not, print an error message and remove the item from the list. Parameters ---------- filenames : list List of filenames to check for existence. Returns ------- list Filtered list of filenames that exists.
[ "Check", "if", "every", "filename", "exists", ".", "If", "not", "print", "an", "error", "message", "and", "remove", "the", "item", "from", "the", "list", "." ]
train
https://github.com/arve0/fijibin/blob/a3d2e983cb9ff2bcbb56a800084bc3b35cb9292f/fijibin/macro.py#L182-L203
pebble/libpebble2
libpebble2/services/voice.py
VoiceService.send_stop_audio
def send_stop_audio(self): ''' Stop an audio streaming session ''' assert self._session_id != VoiceService.SESSION_ID_INVALID self._pebble.send_packet(AudioStream(session_id=self._session_id, data=StopTransfer()))
python
def send_stop_audio(self): ''' Stop an audio streaming session ''' assert self._session_id != VoiceService.SESSION_ID_INVALID self._pebble.send_packet(AudioStream(session_id=self._session_id, data=StopTransfer()))
[ "def", "send_stop_audio", "(", "self", ")", ":", "assert", "self", ".", "_session_id", "!=", "VoiceService", ".", "SESSION_ID_INVALID", "self", ".", "_pebble", ".", "send_packet", "(", "AudioStream", "(", "session_id", "=", "self", ".", "_session_id", ",", "da...
Stop an audio streaming session
[ "Stop", "an", "audio", "streaming", "session" ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/voice.py#L106-L112
pebble/libpebble2
libpebble2/services/voice.py
VoiceService.send_session_setup_result
def send_session_setup_result(self, result, app_uuid=None): ''' Send the result of setting up a dictation session requested by the watch :param result: result of setting up the session :type result: .SetupResult :param app_uuid: UUID of app that initiated the session :type app_uuid: uuid.UUID ''' assert self._session_id != VoiceService.SESSION_ID_INVALID assert isinstance(result, SetupResult) flags = 0 if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) flags |= Flags.AppInitiated logger.debug("Sending session setup result (result={}".format(result) + ", app={})".format(app_uuid) if app_uuid is not None else ")") self._pebble.send_packet(VoiceControlResult(flags=flags, data=SessionSetupResult( session_type=SessionType.Dictation, result=result))) if result != SetupResult.Success: self._session_id = VoiceService.SESSION_ID_INVALID
python
def send_session_setup_result(self, result, app_uuid=None): ''' Send the result of setting up a dictation session requested by the watch :param result: result of setting up the session :type result: .SetupResult :param app_uuid: UUID of app that initiated the session :type app_uuid: uuid.UUID ''' assert self._session_id != VoiceService.SESSION_ID_INVALID assert isinstance(result, SetupResult) flags = 0 if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) flags |= Flags.AppInitiated logger.debug("Sending session setup result (result={}".format(result) + ", app={})".format(app_uuid) if app_uuid is not None else ")") self._pebble.send_packet(VoiceControlResult(flags=flags, data=SessionSetupResult( session_type=SessionType.Dictation, result=result))) if result != SetupResult.Success: self._session_id = VoiceService.SESSION_ID_INVALID
[ "def", "send_session_setup_result", "(", "self", ",", "result", ",", "app_uuid", "=", "None", ")", ":", "assert", "self", ".", "_session_id", "!=", "VoiceService", ".", "SESSION_ID_INVALID", "assert", "isinstance", "(", "result", ",", "SetupResult", ")", "flags"...
Send the result of setting up a dictation session requested by the watch :param result: result of setting up the session :type result: .SetupResult :param app_uuid: UUID of app that initiated the session :type app_uuid: uuid.UUID
[ "Send", "the", "result", "of", "setting", "up", "a", "dictation", "session", "requested", "by", "the", "watch" ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/voice.py#L114-L138
pebble/libpebble2
libpebble2/services/voice.py
VoiceService.send_dictation_result
def send_dictation_result(self, result, sentences=None, app_uuid=None): ''' Send the result of a dictation session :param result: Result of the session :type result: DictationResult :param sentences: list of sentences, each of which is a list of words and punctuation :param app_uuid: UUID of app that initiated the session :type app_uuid: uuid.UUID ''' assert self._session_id != VoiceService.SESSION_ID_INVALID assert isinstance(result, TranscriptionResult) transcription = None if result == TranscriptionResult.Success: if len(sentences) > 0: s_list = [] for s in sentences: words = [Word(confidence=100, data=w) for w in s] s_list.append(Sentence(words=words)) transcription = Transcription(transcription=SentenceList(sentences=s_list)) flags = 0 if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) flags |= Flags.AppInitiated attributes = [] if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) attributes.append(Attribute(id=AttributeType.AppUuid, data=AppUuid(uuid=app_uuid))) if transcription is not None: attributes.append(Attribute(id=AttributeType.Transcription, data=transcription)) logger.debug("Sending dictation result (result={}".format(result) + ", app={})".format(app_uuid) if app_uuid is not None else ")") self._pebble.send_packet(VoiceControlResult(flags=flags, data=DictationResult( session_id=self._session_id, result=result, attributes=AttributeList(dictionary=attributes)))) self._session_id = VoiceService.SESSION_ID_INVALID
python
def send_dictation_result(self, result, sentences=None, app_uuid=None): ''' Send the result of a dictation session :param result: Result of the session :type result: DictationResult :param sentences: list of sentences, each of which is a list of words and punctuation :param app_uuid: UUID of app that initiated the session :type app_uuid: uuid.UUID ''' assert self._session_id != VoiceService.SESSION_ID_INVALID assert isinstance(result, TranscriptionResult) transcription = None if result == TranscriptionResult.Success: if len(sentences) > 0: s_list = [] for s in sentences: words = [Word(confidence=100, data=w) for w in s] s_list.append(Sentence(words=words)) transcription = Transcription(transcription=SentenceList(sentences=s_list)) flags = 0 if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) flags |= Flags.AppInitiated attributes = [] if app_uuid is not None: assert isinstance(app_uuid, uuid.UUID) attributes.append(Attribute(id=AttributeType.AppUuid, data=AppUuid(uuid=app_uuid))) if transcription is not None: attributes.append(Attribute(id=AttributeType.Transcription, data=transcription)) logger.debug("Sending dictation result (result={}".format(result) + ", app={})".format(app_uuid) if app_uuid is not None else ")") self._pebble.send_packet(VoiceControlResult(flags=flags, data=DictationResult( session_id=self._session_id, result=result, attributes=AttributeList(dictionary=attributes)))) self._session_id = VoiceService.SESSION_ID_INVALID
[ "def", "send_dictation_result", "(", "self", ",", "result", ",", "sentences", "=", "None", ",", "app_uuid", "=", "None", ")", ":", "assert", "self", ".", "_session_id", "!=", "VoiceService", ".", "SESSION_ID_INVALID", "assert", "isinstance", "(", "result", ","...
Send the result of a dictation session :param result: Result of the session :type result: DictationResult :param sentences: list of sentences, each of which is a list of words and punctuation :param app_uuid: UUID of app that initiated the session :type app_uuid: uuid.UUID
[ "Send", "the", "result", "of", "a", "dictation", "session" ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/voice.py#L140-L180
denfromufa/clrmagic
clrmagic.py
load_ipython_extension
def load_ipython_extension(ip): """ register magics function, can be called from a notebook """ #ip = get_ipython() ip.register_magics(CustomMagics) # enable C# (CSHARP) highlight patch = ("IPython.config.cell_magic_highlight['clrmagic'] = " "{'reg':[/^%%CS/]};") js = display.Javascript(data=patch, lib=["https://github.com/codemirror/CodeMirror/blob/master/mode/clike/clike.js"])
python
def load_ipython_extension(ip): """ register magics function, can be called from a notebook """ #ip = get_ipython() ip.register_magics(CustomMagics) # enable C# (CSHARP) highlight patch = ("IPython.config.cell_magic_highlight['clrmagic'] = " "{'reg':[/^%%CS/]};") js = display.Javascript(data=patch, lib=["https://github.com/codemirror/CodeMirror/blob/master/mode/clike/clike.js"])
[ "def", "load_ipython_extension", "(", "ip", ")", ":", "#ip = get_ipython()", "ip", ".", "register_magics", "(", "CustomMagics", ")", "# enable C# (CSHARP) highlight", "patch", "=", "(", "\"IPython.config.cell_magic_highlight['clrmagic'] = \"", "\"{'reg':[/^%%CS/]};\"", ")", "...
register magics function, can be called from a notebook
[ "register", "magics", "function", "can", "be", "called", "from", "a", "notebook" ]
train
https://github.com/denfromufa/clrmagic/blob/065215988f112419ca99abe140f13b03e3a14829/clrmagic.py#L69-L79
mathiasertl/xmpp-backends
xmpp_backends/base.py
XmppBackendBase.module
def module(self): """The module specified by the ``library`` attribute.""" if self._module is None: if self.library is None: raise ValueError( "Backend '%s' doesn't specify a library attribute" % self.__class__) try: if '.' in self.library: mod_path, cls_name = self.library.rsplit('.', 1) mod = import_module(mod_path) self._module = getattr(mod, cls_name) else: self._module = import_module(self.library) except (AttributeError, ImportError): raise ValueError("Couldn't load %s backend library" % cls_name) return self._module
python
def module(self): """The module specified by the ``library`` attribute.""" if self._module is None: if self.library is None: raise ValueError( "Backend '%s' doesn't specify a library attribute" % self.__class__) try: if '.' in self.library: mod_path, cls_name = self.library.rsplit('.', 1) mod = import_module(mod_path) self._module = getattr(mod, cls_name) else: self._module = import_module(self.library) except (AttributeError, ImportError): raise ValueError("Couldn't load %s backend library" % cls_name) return self._module
[ "def", "module", "(", "self", ")", ":", "if", "self", ".", "_module", "is", "None", ":", "if", "self", ".", "library", "is", "None", ":", "raise", "ValueError", "(", "\"Backend '%s' doesn't specify a library attribute\"", "%", "self", ".", "__class__", ")", ...
The module specified by the ``library`` attribute.
[ "The", "module", "specified", "by", "the", "library", "attribute", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L174-L192
mathiasertl/xmpp-backends
xmpp_backends/base.py
XmppBackendBase.datetime_to_timestamp
def datetime_to_timestamp(self, dt): """Helper function to convert a datetime object to a timestamp. If datetime instance ``dt`` is naive, it is assumed that it is in UTC. In Python 3, this just calls ``datetime.timestamp()``, in Python 2, it substracts any timezone offset and returns the difference since 1970-01-01 00:00:00. Note that the function always returns an int, even in Python 3. >>> XmppBackendBase().datetime_to_timestamp(datetime(2017, 9, 17, 19, 59)) 1505678340 >>> XmppBackendBase().datetime_to_timestamp(datetime(1984, 11, 6, 13, 21)) 468595260 :param dt: The datetime object to convert. If ``None``, returns the current time. :type dt: datetime :return: The seconds in UTC. :rtype: int """ if dt is None: return int(time.time()) if six.PY3: if not dt.tzinfo: dt = pytz.utc.localize(dt) return int(dt.timestamp()) else: if dt.tzinfo: dt = dt.replace(tzinfo=None) - dt.utcoffset() return int((dt - datetime(1970, 1, 1)).total_seconds())
python
def datetime_to_timestamp(self, dt): """Helper function to convert a datetime object to a timestamp. If datetime instance ``dt`` is naive, it is assumed that it is in UTC. In Python 3, this just calls ``datetime.timestamp()``, in Python 2, it substracts any timezone offset and returns the difference since 1970-01-01 00:00:00. Note that the function always returns an int, even in Python 3. >>> XmppBackendBase().datetime_to_timestamp(datetime(2017, 9, 17, 19, 59)) 1505678340 >>> XmppBackendBase().datetime_to_timestamp(datetime(1984, 11, 6, 13, 21)) 468595260 :param dt: The datetime object to convert. If ``None``, returns the current time. :type dt: datetime :return: The seconds in UTC. :rtype: int """ if dt is None: return int(time.time()) if six.PY3: if not dt.tzinfo: dt = pytz.utc.localize(dt) return int(dt.timestamp()) else: if dt.tzinfo: dt = dt.replace(tzinfo=None) - dt.utcoffset() return int((dt - datetime(1970, 1, 1)).total_seconds())
[ "def", "datetime_to_timestamp", "(", "self", ",", "dt", ")", ":", "if", "dt", "is", "None", ":", "return", "int", "(", "time", ".", "time", "(", ")", ")", "if", "six", ".", "PY3", ":", "if", "not", "dt", ".", "tzinfo", ":", "dt", "=", "pytz", "...
Helper function to convert a datetime object to a timestamp. If datetime instance ``dt`` is naive, it is assumed that it is in UTC. In Python 3, this just calls ``datetime.timestamp()``, in Python 2, it substracts any timezone offset and returns the difference since 1970-01-01 00:00:00. Note that the function always returns an int, even in Python 3. >>> XmppBackendBase().datetime_to_timestamp(datetime(2017, 9, 17, 19, 59)) 1505678340 >>> XmppBackendBase().datetime_to_timestamp(datetime(1984, 11, 6, 13, 21)) 468595260 :param dt: The datetime object to convert. If ``None``, returns the current time. :type dt: datetime :return: The seconds in UTC. :rtype: int
[ "Helper", "function", "to", "convert", "a", "datetime", "object", "to", "a", "timestamp", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L194-L224
mathiasertl/xmpp-backends
xmpp_backends/base.py
XmppBackendBase.get_random_password
def get_random_password(self, length=32, chars=None): """Helper function that gets a random password. :param length: The length of the random password. :type length: int :param chars: A string with characters to choose from. Defaults to all ASCII letters and digits. :type chars: str """ if chars is None: chars = string.ascii_letters + string.digits return ''.join(random.choice(chars) for x in range(length))
python
def get_random_password(self, length=32, chars=None): """Helper function that gets a random password. :param length: The length of the random password. :type length: int :param chars: A string with characters to choose from. Defaults to all ASCII letters and digits. :type chars: str """ if chars is None: chars = string.ascii_letters + string.digits return ''.join(random.choice(chars) for x in range(length))
[ "def", "get_random_password", "(", "self", ",", "length", "=", "32", ",", "chars", "=", "None", ")", ":", "if", "chars", "is", "None", ":", "chars", "=", "string", ".", "ascii_letters", "+", "string", ".", "digits", "return", "''", ".", "join", "(", ...
Helper function that gets a random password. :param length: The length of the random password. :type length: int :param chars: A string with characters to choose from. Defaults to all ASCII letters and digits. :type chars: str
[ "Helper", "function", "that", "gets", "a", "random", "password", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L226-L236
mathiasertl/xmpp-backends
xmpp_backends/base.py
XmppBackendBase.api_version
def api_version(self): """Cached version of :py:func:`~xmpp_backends.base.XmppBackendBase.get_api_version`.""" now = datetime.utcnow() if self.version_cache_timestamp and self.version_cache_timestamp + self.version_cache_timeout > now: return self.version_cache_value # we have a cached value self.version_cache_value = self.get_api_version() if self.minimum_version and self.version_cache_value < self.minimum_version: raise NotSupportedError('%s requires ejabberd >= %s' % (self.__class__.__name__, self.minimum_version)) self.version_cache_timestamp = now return self.version_cache_value
python
def api_version(self): """Cached version of :py:func:`~xmpp_backends.base.XmppBackendBase.get_api_version`.""" now = datetime.utcnow() if self.version_cache_timestamp and self.version_cache_timestamp + self.version_cache_timeout > now: return self.version_cache_value # we have a cached value self.version_cache_value = self.get_api_version() if self.minimum_version and self.version_cache_value < self.minimum_version: raise NotSupportedError('%s requires ejabberd >= %s' % (self.__class__.__name__, self.minimum_version)) self.version_cache_timestamp = now return self.version_cache_value
[ "def", "api_version", "(", "self", ")", ":", "now", "=", "datetime", ".", "utcnow", "(", ")", "if", "self", ".", "version_cache_timestamp", "and", "self", ".", "version_cache_timestamp", "+", "self", ".", "version_cache_timeout", ">", "now", ":", "return", "...
Cached version of :py:func:`~xmpp_backends.base.XmppBackendBase.get_api_version`.
[ "Cached", "version", "of", ":", "py", ":", "func", ":", "~xmpp_backends", ".", "base", ".", "XmppBackendBase", ".", "get_api_version", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L239-L254
mathiasertl/xmpp-backends
xmpp_backends/base.py
XmppBackendBase.create_reservation
def create_reservation(self, username, domain, email=None): """Reserve a new account. This method is called when a user account should be reserved, meaning that the account can no longer be registered by anybody else but the user cannot yet log in either. This is useful if e.g. an email confirmation is still pending. The default implementation calls :py:func:`~xmpp_backends.base.XmppBackendBase.create_user` with a random password. :param username: The username of the user. :type username: str :param domain: The domain of the user. :type domain: str :param email: The email address provided by the user. Note that at this point it is not confirmed. You are free to ignore this parameter. """ password = self.get_random_password() self.create(username=username, domain=domain, password=password, email=email)
python
def create_reservation(self, username, domain, email=None): """Reserve a new account. This method is called when a user account should be reserved, meaning that the account can no longer be registered by anybody else but the user cannot yet log in either. This is useful if e.g. an email confirmation is still pending. The default implementation calls :py:func:`~xmpp_backends.base.XmppBackendBase.create_user` with a random password. :param username: The username of the user. :type username: str :param domain: The domain of the user. :type domain: str :param email: The email address provided by the user. Note that at this point it is not confirmed. You are free to ignore this parameter. """ password = self.get_random_password() self.create(username=username, domain=domain, password=password, email=email)
[ "def", "create_reservation", "(", "self", ",", "username", ",", "domain", ",", "email", "=", "None", ")", ":", "password", "=", "self", ".", "get_random_password", "(", ")", "self", ".", "create", "(", "username", "=", "username", ",", "domain", "=", "do...
Reserve a new account. This method is called when a user account should be reserved, meaning that the account can no longer be registered by anybody else but the user cannot yet log in either. This is useful if e.g. an email confirmation is still pending. The default implementation calls :py:func:`~xmpp_backends.base.XmppBackendBase.create_user` with a random password. :param username: The username of the user. :type username: str :param domain: The domain of the user. :type domain: str :param email: The email address provided by the user. Note that at this point it is not confirmed. You are free to ignore this parameter.
[ "Reserve", "a", "new", "account", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L325-L343
mathiasertl/xmpp-backends
xmpp_backends/base.py
XmppBackendBase.confirm_reservation
def confirm_reservation(self, username, domain, password, email=None): """Confirm a reservation for a username. The default implementation just calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` and optionally :py:func:`~xmpp_backends.base.XmppBackendBase.set_email`. """ self.set_password(username=username, domain=domain, password=password) if email is not None: self.set_email(username=username, domain=domain, email=email)
python
def confirm_reservation(self, username, domain, password, email=None): """Confirm a reservation for a username. The default implementation just calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` and optionally :py:func:`~xmpp_backends.base.XmppBackendBase.set_email`. """ self.set_password(username=username, domain=domain, password=password) if email is not None: self.set_email(username=username, domain=domain, email=email)
[ "def", "confirm_reservation", "(", "self", ",", "username", ",", "domain", ",", "password", ",", "email", "=", "None", ")", ":", "self", ".", "set_password", "(", "username", "=", "username", ",", "domain", "=", "domain", ",", "password", "=", "password", ...
Confirm a reservation for a username. The default implementation just calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` and optionally :py:func:`~xmpp_backends.base.XmppBackendBase.set_email`.
[ "Confirm", "a", "reservation", "for", "a", "username", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L345-L353
mathiasertl/xmpp-backends
xmpp_backends/base.py
XmppBackendBase.block_user
def block_user(self, username, domain): """Block the specified user. The default implementation calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` with a random password. :param username: The username of the user. :type username: str :param domain: The domain of the user. :type domain: str """ self.set_password(username, domain, self.get_random_password())
python
def block_user(self, username, domain): """Block the specified user. The default implementation calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` with a random password. :param username: The username of the user. :type username: str :param domain: The domain of the user. :type domain: str """ self.set_password(username, domain, self.get_random_password())
[ "def", "block_user", "(", "self", ",", "username", ",", "domain", ")", ":", "self", ".", "set_password", "(", "username", ",", "domain", ",", "self", ".", "get_random_password", "(", ")", ")" ]
Block the specified user. The default implementation calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` with a random password. :param username: The username of the user. :type username: str :param domain: The domain of the user. :type domain: str
[ "Block", "the", "specified", "user", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L416-L427
mathiasertl/xmpp-backends
xmpp_backends/base.py
EjabberdBackendBase.parse_connection_string
def parse_connection_string(self, connection): """Parse string as returned by the ``connected_users_info`` or ``user_sessions_info`` API calls. >>> EjabberdBackendBase().parse_connection_string('c2s_tls') (0, True, False) >>> EjabberdBackendBase().parse_connection_string('c2s_compressed_tls') (0, True, True) >>> EjabberdBackendBase().parse_connection_string('http_bind') (2, None, None) :param connection: The connection string as returned by the ejabberd APIs. :type connection: str :return: A tuple representing the conntion type, if it is encrypted and if it uses XMPP stream compression. :rtype: tuple """ # TODO: Websockets, HTTP Polling if connection == 'c2s_tls': return CONNECTION_XMPP, True, False elif connection == 'c2s_compressed_tls': return CONNECTION_XMPP, True, True elif connection == 'http_bind': return CONNECTION_HTTP_BINDING, None, None elif connection == 'c2s': return CONNECTION_XMPP, False, False log.warn('Could not parse connection string "%s"', connection) return CONNECTION_UNKNOWN, True, True
python
def parse_connection_string(self, connection): """Parse string as returned by the ``connected_users_info`` or ``user_sessions_info`` API calls. >>> EjabberdBackendBase().parse_connection_string('c2s_tls') (0, True, False) >>> EjabberdBackendBase().parse_connection_string('c2s_compressed_tls') (0, True, True) >>> EjabberdBackendBase().parse_connection_string('http_bind') (2, None, None) :param connection: The connection string as returned by the ejabberd APIs. :type connection: str :return: A tuple representing the conntion type, if it is encrypted and if it uses XMPP stream compression. :rtype: tuple """ # TODO: Websockets, HTTP Polling if connection == 'c2s_tls': return CONNECTION_XMPP, True, False elif connection == 'c2s_compressed_tls': return CONNECTION_XMPP, True, True elif connection == 'http_bind': return CONNECTION_HTTP_BINDING, None, None elif connection == 'c2s': return CONNECTION_XMPP, False, False log.warn('Could not parse connection string "%s"', connection) return CONNECTION_UNKNOWN, True, True
[ "def", "parse_connection_string", "(", "self", ",", "connection", ")", ":", "# TODO: Websockets, HTTP Polling", "if", "connection", "==", "'c2s_tls'", ":", "return", "CONNECTION_XMPP", ",", "True", ",", "False", "elif", "connection", "==", "'c2s_compressed_tls'", ":",...
Parse string as returned by the ``connected_users_info`` or ``user_sessions_info`` API calls. >>> EjabberdBackendBase().parse_connection_string('c2s_tls') (0, True, False) >>> EjabberdBackendBase().parse_connection_string('c2s_compressed_tls') (0, True, True) >>> EjabberdBackendBase().parse_connection_string('http_bind') (2, None, None) :param connection: The connection string as returned by the ejabberd APIs. :type connection: str :return: A tuple representing the conntion type, if it is encrypted and if it uses XMPP stream compression. :rtype: tuple
[ "Parse", "string", "as", "returned", "by", "the", "connected_users_info", "or", "user_sessions_info", "API", "calls", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L564-L590
mathiasertl/xmpp-backends
xmpp_backends/base.py
EjabberdBackendBase.parse_ip_address
def parse_ip_address(self, ip_address): """Parse an address as returned by the ``connected_users_info`` or ``user_sessions_info`` API calls. Example:: >>> EjabberdBackendBase().parse_ip_address('192.168.0.1') # doctest: +FORCE_TEXT IPv4Address('192.168.0.1') >>> EjabberdBackendBase().parse_ip_address('::FFFF:192.168.0.1') # doctest: +FORCE_TEXT IPv4Address('192.168.0.1') >>> EjabberdBackendBase().parse_ip_address('::1') # doctest: +FORCE_TEXT IPv6Address('::1') :param ip_address: An IP address. :type ip_address: str :return: The parsed IP address. :rtype: `ipaddress.IPv6Address` or `ipaddress.IPv4Address`. """ if ip_address.startswith('::FFFF:'): ip_address = ip_address[7:] if six.PY2 and isinstance(ip_address, str): # ipaddress constructor does not eat str in py2 :-/ ip_address = ip_address.decode('utf-8') return ipaddress.ip_address(ip_address)
python
def parse_ip_address(self, ip_address): """Parse an address as returned by the ``connected_users_info`` or ``user_sessions_info`` API calls. Example:: >>> EjabberdBackendBase().parse_ip_address('192.168.0.1') # doctest: +FORCE_TEXT IPv4Address('192.168.0.1') >>> EjabberdBackendBase().parse_ip_address('::FFFF:192.168.0.1') # doctest: +FORCE_TEXT IPv4Address('192.168.0.1') >>> EjabberdBackendBase().parse_ip_address('::1') # doctest: +FORCE_TEXT IPv6Address('::1') :param ip_address: An IP address. :type ip_address: str :return: The parsed IP address. :rtype: `ipaddress.IPv6Address` or `ipaddress.IPv4Address`. """ if ip_address.startswith('::FFFF:'): ip_address = ip_address[7:] if six.PY2 and isinstance(ip_address, str): # ipaddress constructor does not eat str in py2 :-/ ip_address = ip_address.decode('utf-8') return ipaddress.ip_address(ip_address)
[ "def", "parse_ip_address", "(", "self", ",", "ip_address", ")", ":", "if", "ip_address", ".", "startswith", "(", "'::FFFF:'", ")", ":", "ip_address", "=", "ip_address", "[", "7", ":", "]", "if", "six", ".", "PY2", "and", "isinstance", "(", "ip_address", ...
Parse an address as returned by the ``connected_users_info`` or ``user_sessions_info`` API calls. Example:: >>> EjabberdBackendBase().parse_ip_address('192.168.0.1') # doctest: +FORCE_TEXT IPv4Address('192.168.0.1') >>> EjabberdBackendBase().parse_ip_address('::FFFF:192.168.0.1') # doctest: +FORCE_TEXT IPv4Address('192.168.0.1') >>> EjabberdBackendBase().parse_ip_address('::1') # doctest: +FORCE_TEXT IPv6Address('::1') :param ip_address: An IP address. :type ip_address: str :return: The parsed IP address. :rtype: `ipaddress.IPv6Address` or `ipaddress.IPv4Address`.
[ "Parse", "an", "address", "as", "returned", "by", "the", "connected_users_info", "or", "user_sessions_info", "API", "calls", "." ]
train
https://github.com/mathiasertl/xmpp-backends/blob/214ef0664dbf90fa300c2483b9b3416559e5d171/xmpp_backends/base.py#L592-L615
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.pump_reader
def pump_reader(self): """ Synchronously reads one message from the watch, blocking until a message is available. All events caused by the message read will be processed before this method returns. .. note:: You usually don't need to invoke this method manually; instead, see :meth:`run_sync` and :meth:`run_async`. """ origin, message = self.transport.read_packet() if isinstance(origin, MessageTargetWatch): self._handle_watch_message(message) else: self._broadcast_transport_message(origin, message)
python
def pump_reader(self): """ Synchronously reads one message from the watch, blocking until a message is available. All events caused by the message read will be processed before this method returns. .. note:: You usually don't need to invoke this method manually; instead, see :meth:`run_sync` and :meth:`run_async`. """ origin, message = self.transport.read_packet() if isinstance(origin, MessageTargetWatch): self._handle_watch_message(message) else: self._broadcast_transport_message(origin, message)
[ "def", "pump_reader", "(", "self", ")", ":", "origin", ",", "message", "=", "self", ".", "transport", ".", "read_packet", "(", ")", "if", "isinstance", "(", "origin", ",", "MessageTargetWatch", ")", ":", "self", ".", "_handle_watch_message", "(", "message", ...
Synchronously reads one message from the watch, blocking until a message is available. All events caused by the message read will be processed before this method returns. .. note:: You usually don't need to invoke this method manually; instead, see :meth:`run_sync` and :meth:`run_async`.
[ "Synchronously", "reads", "one", "message", "from", "the", "watch", "blocking", "until", "a", "message", "is", "available", ".", "All", "events", "caused", "by", "the", "message", "read", "will", "be", "processed", "before", "this", "method", "returns", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L65-L77
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.run_sync
def run_sync(self): """ Runs the message loop until the Pebble disconnects. This method will block until the watch disconnects or a fatal error occurs. For alternatives that don't block forever, see :meth:`pump_reader` and :meth:`run_async`. """ while self.connected: try: self.pump_reader() except PacketDecodeError as e: logger.warning("Packet decode failed: %s", e) except ConnectionError: break
python
def run_sync(self): """ Runs the message loop until the Pebble disconnects. This method will block until the watch disconnects or a fatal error occurs. For alternatives that don't block forever, see :meth:`pump_reader` and :meth:`run_async`. """ while self.connected: try: self.pump_reader() except PacketDecodeError as e: logger.warning("Packet decode failed: %s", e) except ConnectionError: break
[ "def", "run_sync", "(", "self", ")", ":", "while", "self", ".", "connected", ":", "try", ":", "self", ".", "pump_reader", "(", ")", "except", "PacketDecodeError", "as", "e", ":", "logger", ".", "warning", "(", "\"Packet decode failed: %s\"", ",", "e", ")",...
Runs the message loop until the Pebble disconnects. This method will block until the watch disconnects or a fatal error occurs. For alternatives that don't block forever, see :meth:`pump_reader` and :meth:`run_async`.
[ "Runs", "the", "message", "loop", "until", "the", "Pebble", "disconnects", ".", "This", "method", "will", "block", "until", "the", "watch", "disconnects", "or", "a", "fatal", "error", "occurs", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L79-L92
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.run_async
def run_async(self): """ Spawns a new thread that runs the message loop until the Pebble disconnects. ``run_async`` will call :meth:`fetch_watch_info` on your behalf, and block until it receives a response. """ thread = threading.Thread(target=self.run_sync) thread.daemon = True thread.name = "PebbleConnection" thread.start() self.fetch_watch_info()
python
def run_async(self): """ Spawns a new thread that runs the message loop until the Pebble disconnects. ``run_async`` will call :meth:`fetch_watch_info` on your behalf, and block until it receives a response. """ thread = threading.Thread(target=self.run_sync) thread.daemon = True thread.name = "PebbleConnection" thread.start() self.fetch_watch_info()
[ "def", "run_async", "(", "self", ")", ":", "thread", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "run_sync", ")", "thread", ".", "daemon", "=", "True", "thread", ".", "name", "=", "\"PebbleConnection\"", "thread", ".", "start", "(",...
Spawns a new thread that runs the message loop until the Pebble disconnects. ``run_async`` will call :meth:`fetch_watch_info` on your behalf, and block until it receives a response.
[ "Spawns", "a", "new", "thread", "that", "runs", "the", "message", "loop", "until", "the", "Pebble", "disconnects", ".", "run_async", "will", "call", ":", "meth", ":", "fetch_watch_info", "on", "your", "behalf", "and", "block", "until", "it", "receives", "a",...
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L94-L103
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection._handle_watch_message
def _handle_watch_message(self, message): """ Processes a binary message received from the watch and broadcasts the relevant events. :param message: A raw message from the watch, without any transport framing. :type message: bytes """ if self.log_protocol_level is not None: logger.log(self.log_protocol_level, "<- %s", hexlify(message).decode()) message = self.pending_bytes + message while len(message) >= 4: try: packet, length = PebblePacket.parse_message(message) except IncompleteMessage: self.pending_bytes = message break except: # At this point we've failed to deconstruct the message via normal means, but we don't want to end # up permanently desynced (because we wiped a partial message), nor do we want to get stuck (because # we didn't wipe anything). We therefore parse the packet length manually and skip ahead that far. # If the expected length is 0, we wipe everything to ensure forward motion (but we are quite probably # screwed). expected_length, = struct.unpack('!H', message[:2]) if expected_length == 0: self.pending_bytes = b'' else: self.pending_bytes = message[expected_length + 4:] raise self.event_handler.broadcast_event("raw_inbound", message[:length]) if self.log_packet_level is not None: logger.log(self.log_packet_level, "<- %s", packet) message = message[length:] self.event_handler.broadcast_event((_EventType.Watch, type(packet)), packet) if length == 0: break self.pending_bytes = message
python
def _handle_watch_message(self, message): """ Processes a binary message received from the watch and broadcasts the relevant events. :param message: A raw message from the watch, without any transport framing. :type message: bytes """ if self.log_protocol_level is not None: logger.log(self.log_protocol_level, "<- %s", hexlify(message).decode()) message = self.pending_bytes + message while len(message) >= 4: try: packet, length = PebblePacket.parse_message(message) except IncompleteMessage: self.pending_bytes = message break except: # At this point we've failed to deconstruct the message via normal means, but we don't want to end # up permanently desynced (because we wiped a partial message), nor do we want to get stuck (because # we didn't wipe anything). We therefore parse the packet length manually and skip ahead that far. # If the expected length is 0, we wipe everything to ensure forward motion (but we are quite probably # screwed). expected_length, = struct.unpack('!H', message[:2]) if expected_length == 0: self.pending_bytes = b'' else: self.pending_bytes = message[expected_length + 4:] raise self.event_handler.broadcast_event("raw_inbound", message[:length]) if self.log_packet_level is not None: logger.log(self.log_packet_level, "<- %s", packet) message = message[length:] self.event_handler.broadcast_event((_EventType.Watch, type(packet)), packet) if length == 0: break self.pending_bytes = message
[ "def", "_handle_watch_message", "(", "self", ",", "message", ")", ":", "if", "self", ".", "log_protocol_level", "is", "not", "None", ":", "logger", ".", "log", "(", "self", ".", "log_protocol_level", ",", "\"<- %s\"", ",", "hexlify", "(", "message", ")", "...
Processes a binary message received from the watch and broadcasts the relevant events. :param message: A raw message from the watch, without any transport framing. :type message: bytes
[ "Processes", "a", "binary", "message", "received", "from", "the", "watch", "and", "broadcasts", "the", "relevant", "events", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L105-L142
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection._broadcast_transport_message
def _broadcast_transport_message(self, origin, message): """ Broadcasts an event originating from a transport that does not represent a message from the Pebble. :param origin: The type of transport responsible for the message. :type origin: .MessageTarget :param message: The message from the transport """ self.event_handler.broadcast_event((_EventType.Transport, type(origin), type(message)), message)
python
def _broadcast_transport_message(self, origin, message): """ Broadcasts an event originating from a transport that does not represent a message from the Pebble. :param origin: The type of transport responsible for the message. :type origin: .MessageTarget :param message: The message from the transport """ self.event_handler.broadcast_event((_EventType.Transport, type(origin), type(message)), message)
[ "def", "_broadcast_transport_message", "(", "self", ",", "origin", ",", "message", ")", ":", "self", ".", "event_handler", ".", "broadcast_event", "(", "(", "_EventType", ".", "Transport", ",", "type", "(", "origin", ")", ",", "type", "(", "message", ")", ...
Broadcasts an event originating from a transport that does not represent a message from the Pebble. :param origin: The type of transport responsible for the message. :type origin: .MessageTarget :param message: The message from the transport
[ "Broadcasts", "an", "event", "originating", "from", "a", "transport", "that", "does", "not", "represent", "a", "message", "from", "the", "Pebble", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L144-L152
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.register_transport_endpoint
def register_transport_endpoint(self, origin, message_type, handler): """ Register a handler for a message received from a transport that does not indicate a message from the connected Pebble. :param origin: The type of :class:`.MessageTarget` that triggers the message :param message_type: The class of the message that is expected. :param handler: A callback to be called when a message is received. :type handler: callable :return: A handle that can be passed to :meth:`unregister_endpoint` to remove the handler. """ return self.event_handler.register_handler((_EventType.Transport, origin, message_type), handler)
python
def register_transport_endpoint(self, origin, message_type, handler): """ Register a handler for a message received from a transport that does not indicate a message from the connected Pebble. :param origin: The type of :class:`.MessageTarget` that triggers the message :param message_type: The class of the message that is expected. :param handler: A callback to be called when a message is received. :type handler: callable :return: A handle that can be passed to :meth:`unregister_endpoint` to remove the handler. """ return self.event_handler.register_handler((_EventType.Transport, origin, message_type), handler)
[ "def", "register_transport_endpoint", "(", "self", ",", "origin", ",", "message_type", ",", "handler", ")", ":", "return", "self", ".", "event_handler", ".", "register_handler", "(", "(", "_EventType", ".", "Transport", ",", "origin", ",", "message_type", ")", ...
Register a handler for a message received from a transport that does not indicate a message from the connected Pebble. :param origin: The type of :class:`.MessageTarget` that triggers the message :param message_type: The class of the message that is expected. :param handler: A callback to be called when a message is received. :type handler: callable :return: A handle that can be passed to :meth:`unregister_endpoint` to remove the handler.
[ "Register", "a", "handler", "for", "a", "message", "received", "from", "a", "transport", "that", "does", "not", "indicate", "a", "message", "from", "the", "connected", "Pebble", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L154-L165
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.register_endpoint
def register_endpoint(self, endpoint, handler): """ Register a handler for a message received from the Pebble. :param endpoint: The type of :class:`.PebblePacket` that is being listened for. :type endpoint: .PacketType :param handler: A callback to be called when a message is received. :type handler: callable :return: A handle that can be passed to :meth:`unregister_endpoint` to remove the handler. """ return self.event_handler.register_handler((_EventType.Watch, endpoint), handler)
python
def register_endpoint(self, endpoint, handler): """ Register a handler for a message received from the Pebble. :param endpoint: The type of :class:`.PebblePacket` that is being listened for. :type endpoint: .PacketType :param handler: A callback to be called when a message is received. :type handler: callable :return: A handle that can be passed to :meth:`unregister_endpoint` to remove the handler. """ return self.event_handler.register_handler((_EventType.Watch, endpoint), handler)
[ "def", "register_endpoint", "(", "self", ",", "endpoint", ",", "handler", ")", ":", "return", "self", ".", "event_handler", ".", "register_handler", "(", "(", "_EventType", ".", "Watch", ",", "endpoint", ")", ",", "handler", ")" ]
Register a handler for a message received from the Pebble. :param endpoint: The type of :class:`.PebblePacket` that is being listened for. :type endpoint: .PacketType :param handler: A callback to be called when a message is received. :type handler: callable :return: A handle that can be passed to :meth:`unregister_endpoint` to remove the handler.
[ "Register", "a", "handler", "for", "a", "message", "received", "from", "the", "Pebble", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L167-L177
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.read_from_endpoint
def read_from_endpoint(self, endpoint, timeout=15): """ Blocking read from an endpoint. Will block until a message is received, or it times out. Also see :meth:`get_endpoint_queue` if you are considering calling this in a loop. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. .. note:: If you're reading a response to a message you just sent, :meth:`send_and_read` might be more appropriate. :param endpoint: The endpoint to read from. :type endpoint: .PacketType :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The message read from the endpoint; of the same type as passed to ``endpoint``. """ return self.event_handler.wait_for_event((_EventType.Watch, endpoint), timeout=timeout)
python
def read_from_endpoint(self, endpoint, timeout=15): """ Blocking read from an endpoint. Will block until a message is received, or it times out. Also see :meth:`get_endpoint_queue` if you are considering calling this in a loop. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. .. note:: If you're reading a response to a message you just sent, :meth:`send_and_read` might be more appropriate. :param endpoint: The endpoint to read from. :type endpoint: .PacketType :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The message read from the endpoint; of the same type as passed to ``endpoint``. """ return self.event_handler.wait_for_event((_EventType.Watch, endpoint), timeout=timeout)
[ "def", "read_from_endpoint", "(", "self", ",", "endpoint", ",", "timeout", "=", "15", ")", ":", "return", "self", ".", "event_handler", ".", "wait_for_event", "(", "(", "_EventType", ".", "Watch", ",", "endpoint", ")", ",", "timeout", "=", "timeout", ")" ]
Blocking read from an endpoint. Will block until a message is received, or it times out. Also see :meth:`get_endpoint_queue` if you are considering calling this in a loop. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. .. note:: If you're reading a response to a message you just sent, :meth:`send_and_read` might be more appropriate. :param endpoint: The endpoint to read from. :type endpoint: .PacketType :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The message read from the endpoint; of the same type as passed to ``endpoint``.
[ "Blocking", "read", "from", "an", "endpoint", ".", "Will", "block", "until", "a", "message", "is", "received", "or", "it", "times", "out", ".", "Also", "see", ":", "meth", ":", "get_endpoint_queue", "if", "you", "are", "considering", "calling", "this", "in...
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L209-L225
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.read_transport_message
def read_transport_message(self, origin, message_type, timeout=15): """ Blocking read of a transport message that does not indicate a message from the Pebble. Will block until a message is received, or it times out. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. :param origin: The type of :class:`.MessageTarget` that triggers the message. :param message_type: The class of the message to read from the transport. :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The object read from the transport; of the same type as passed to ``message_type``. """ return self.event_handler.wait_for_event((_EventType.Transport, origin, message_type), timeout=timeout)
python
def read_transport_message(self, origin, message_type, timeout=15): """ Blocking read of a transport message that does not indicate a message from the Pebble. Will block until a message is received, or it times out. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. :param origin: The type of :class:`.MessageTarget` that triggers the message. :param message_type: The class of the message to read from the transport. :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The object read from the transport; of the same type as passed to ``message_type``. """ return self.event_handler.wait_for_event((_EventType.Transport, origin, message_type), timeout=timeout)
[ "def", "read_transport_message", "(", "self", ",", "origin", ",", "message_type", ",", "timeout", "=", "15", ")", ":", "return", "self", ".", "event_handler", ".", "wait_for_event", "(", "(", "_EventType", ".", "Transport", ",", "origin", ",", "message_type", ...
Blocking read of a transport message that does not indicate a message from the Pebble. Will block until a message is received, or it times out. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. :param origin: The type of :class:`.MessageTarget` that triggers the message. :param message_type: The class of the message to read from the transport. :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The object read from the transport; of the same type as passed to ``message_type``.
[ "Blocking", "read", "of", "a", "transport", "message", "that", "does", "not", "indicate", "a", "message", "from", "the", "Pebble", ".", "Will", "block", "until", "a", "message", "is", "received", "or", "it", "times", "out", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L240-L253
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.send_packet
def send_packet(self, packet): """ Sends a message to the Pebble. :param packet: The message to send. :type packet: .PebblePacket """ if self.log_packet_level: logger.log(self.log_packet_level, "-> %s", packet) serialised = packet.serialise_packet() self.event_handler.broadcast_event("raw_outbound", serialised) self.send_raw(serialised)
python
def send_packet(self, packet): """ Sends a message to the Pebble. :param packet: The message to send. :type packet: .PebblePacket """ if self.log_packet_level: logger.log(self.log_packet_level, "-> %s", packet) serialised = packet.serialise_packet() self.event_handler.broadcast_event("raw_outbound", serialised) self.send_raw(serialised)
[ "def", "send_packet", "(", "self", ",", "packet", ")", ":", "if", "self", ".", "log_packet_level", ":", "logger", ".", "log", "(", "self", ".", "log_packet_level", ",", "\"-> %s\"", ",", "packet", ")", "serialised", "=", "packet", ".", "serialise_packet", ...
Sends a message to the Pebble. :param packet: The message to send. :type packet: .PebblePacket
[ "Sends", "a", "message", "to", "the", "Pebble", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L255-L266
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.send_and_read
def send_and_read(self, packet, endpoint, timeout=15): """ Sends a packet, then returns the next response received from that endpoint. This method sets up a listener before it actually sends the message, avoiding a potential race. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. :param packet: The message to send. :type packet: .PebblePacket :param endpoint: The endpoint to read from :type endpoint: .PacketType :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The message read from the endpoint; of the same type as passed to ``endpoint``. """ queue = self.get_endpoint_queue(endpoint) self.send_packet(packet) try: return queue.get(timeout=timeout) finally: queue.close()
python
def send_and_read(self, packet, endpoint, timeout=15): """ Sends a packet, then returns the next response received from that endpoint. This method sets up a listener before it actually sends the message, avoiding a potential race. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. :param packet: The message to send. :type packet: .PebblePacket :param endpoint: The endpoint to read from :type endpoint: .PacketType :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The message read from the endpoint; of the same type as passed to ``endpoint``. """ queue = self.get_endpoint_queue(endpoint) self.send_packet(packet) try: return queue.get(timeout=timeout) finally: queue.close()
[ "def", "send_and_read", "(", "self", ",", "packet", ",", "endpoint", ",", "timeout", "=", "15", ")", ":", "queue", "=", "self", ".", "get_endpoint_queue", "(", "endpoint", ")", "self", ".", "send_packet", "(", "packet", ")", "try", ":", "return", "queue"...
Sends a packet, then returns the next response received from that endpoint. This method sets up a listener before it actually sends the message, avoiding a potential race. .. warning:: Avoid calling this method from an endpoint callback; doing so is likely to lead to deadlock. :param packet: The message to send. :type packet: .PebblePacket :param endpoint: The endpoint to read from :type endpoint: .PacketType :param timeout: The maximum time to wait before raising :exc:`.TimeoutError`. :return: The message read from the endpoint; of the same type as passed to ``endpoint``.
[ "Sends", "a", "packet", "then", "returns", "the", "next", "response", "received", "from", "that", "endpoint", ".", "This", "method", "sets", "up", "a", "listener", "before", "it", "actually", "sends", "the", "message", "avoiding", "a", "potential", "race", "...
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L268-L288
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.send_raw
def send_raw(self, message): """ Sends a raw binary message to the Pebble. No processing will be applied, but any transport framing should be omitted. :param message: The message to send to the pebble. :type message: bytes """ if self.log_protocol_level: logger.log(self.log_protocol_level, "-> %s", hexlify(message).decode()) self.transport.send_packet(message)
python
def send_raw(self, message): """ Sends a raw binary message to the Pebble. No processing will be applied, but any transport framing should be omitted. :param message: The message to send to the pebble. :type message: bytes """ if self.log_protocol_level: logger.log(self.log_protocol_level, "-> %s", hexlify(message).decode()) self.transport.send_packet(message)
[ "def", "send_raw", "(", "self", ",", "message", ")", ":", "if", "self", ".", "log_protocol_level", ":", "logger", ".", "log", "(", "self", ".", "log_protocol_level", ",", "\"-> %s\"", ",", "hexlify", "(", "message", ")", ".", "decode", "(", ")", ")", "...
Sends a raw binary message to the Pebble. No processing will be applied, but any transport framing should be omitted. :param message: The message to send to the pebble. :type message: bytes
[ "Sends", "a", "raw", "binary", "message", "to", "the", "Pebble", ".", "No", "processing", "will", "be", "applied", "but", "any", "transport", "framing", "should", "be", "omitted", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L290-L300
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.fetch_watch_info
def fetch_watch_info(self): """ This method should be called before accessing :attr:`watch_info`, :attr:`firmware_version` or :attr:`watch_platform`. Blocks until it has fetched the required information. """ self._watch_info = self.send_and_read(WatchVersion(data=WatchVersionRequest()), WatchVersion).data
python
def fetch_watch_info(self): """ This method should be called before accessing :attr:`watch_info`, :attr:`firmware_version` or :attr:`watch_platform`. Blocks until it has fetched the required information. """ self._watch_info = self.send_and_read(WatchVersion(data=WatchVersionRequest()), WatchVersion).data
[ "def", "fetch_watch_info", "(", "self", ")", ":", "self", ".", "_watch_info", "=", "self", ".", "send_and_read", "(", "WatchVersion", "(", "data", "=", "WatchVersionRequest", "(", ")", ")", ",", "WatchVersion", ")", ".", "data" ]
This method should be called before accessing :attr:`watch_info`, :attr:`firmware_version` or :attr:`watch_platform`. Blocks until it has fetched the required information.
[ "This", "method", "should", "be", "called", "before", "accessing", ":", "attr", ":", "watch_info", ":", "attr", ":", "firmware_version", "or", ":", "attr", ":", "watch_platform", ".", "Blocks", "until", "it", "has", "fetched", "the", "required", "information",...
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L319-L324
pebble/libpebble2
libpebble2/communication/__init__.py
PebbleConnection.firmware_version
def firmware_version(self): """ Provides information on the connected Pebble, including its firmware version, language, capabilities, etc. .. note: This is a blocking call if :meth:`fetch_watch_info` has not yet been called, which could lead to deadlock if called in an endpoint callback. :rtype: .WatchVersionResponse """ version = self.watch_info.running.version_tag[1:] parts = version.split('-', 1) points = [int(x) for x in parts[0].split('.')] while len(points) < 3: points.append(0) if len(parts) == 2: suffix = parts[1] else: suffix = '' return FirmwareVersion(*(points + [suffix]))
python
def firmware_version(self): """ Provides information on the connected Pebble, including its firmware version, language, capabilities, etc. .. note: This is a blocking call if :meth:`fetch_watch_info` has not yet been called, which could lead to deadlock if called in an endpoint callback. :rtype: .WatchVersionResponse """ version = self.watch_info.running.version_tag[1:] parts = version.split('-', 1) points = [int(x) for x in parts[0].split('.')] while len(points) < 3: points.append(0) if len(parts) == 2: suffix = parts[1] else: suffix = '' return FirmwareVersion(*(points + [suffix]))
[ "def", "firmware_version", "(", "self", ")", ":", "version", "=", "self", ".", "watch_info", ".", "running", ".", "version_tag", "[", "1", ":", "]", "parts", "=", "version", ".", "split", "(", "'-'", ",", "1", ")", "points", "=", "[", "int", "(", "...
Provides information on the connected Pebble, including its firmware version, language, capabilities, etc. .. note: This is a blocking call if :meth:`fetch_watch_info` has not yet been called, which could lead to deadlock if called in an endpoint callback. :rtype: .WatchVersionResponse
[ "Provides", "information", "on", "the", "connected", "Pebble", "including", "its", "firmware", "version", "language", "capabilities", "etc", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/communication/__init__.py#L342-L361
INM-6/hybridLFPy
hybridLFPy/gdf.py
GDF._blockread
def _blockread(self, fname): """ Generator yields bsize lines from gdf file. Hidden method. Parameters ---------- fname : str Name of gdf-file. Yields ------ list file contents """ with open(fname, 'rb') as f: while True: a = [] for i in range(self.bsize): line = f.readline() if not line: break a.append(line.split()) if a == []: raise StopIteration yield a
python
def _blockread(self, fname): """ Generator yields bsize lines from gdf file. Hidden method. Parameters ---------- fname : str Name of gdf-file. Yields ------ list file contents """ with open(fname, 'rb') as f: while True: a = [] for i in range(self.bsize): line = f.readline() if not line: break a.append(line.split()) if a == []: raise StopIteration yield a
[ "def", "_blockread", "(", "self", ",", "fname", ")", ":", "with", "open", "(", "fname", ",", "'rb'", ")", "as", "f", ":", "while", "True", ":", "a", "=", "[", "]", "for", "i", "in", "range", "(", "self", ".", "bsize", ")", ":", "line", "=", "...
Generator yields bsize lines from gdf file. Hidden method. Parameters ---------- fname : str Name of gdf-file. Yields ------ list file contents
[ "Generator", "yields", "bsize", "lines", "from", "gdf", "file", ".", "Hidden", "method", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/gdf.py#L105-L131
INM-6/hybridLFPy
hybridLFPy/gdf.py
GDF.create
def create(self, re='brunel-py-ex-*.gdf', index=True): """ Create db from list of gdf file glob Parameters ---------- re : str File glob to load. index : bool Create index on neurons for speed. Returns ------- None See also -------- sqlite3.connect.cursor, sqlite3.connect """ self.cursor.execute('CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)') tic = now() for f in glob.glob(re): print(f) while True: try: for data in self._blockread(f): self.cursor.executemany('INSERT INTO spikes VALUES (?, ?)', data) self.conn.commit() except: continue break toc = now() if self.debug: print('Inserts took %g seconds.' % (toc-tic)) # Optionally, create index for speed if index: tic = now() self.cursor.execute('CREATE INDEX neuron_index on spikes (neuron)') toc = now() if self.debug: print('Indexed db in %g seconds.' % (toc-tic))
python
def create(self, re='brunel-py-ex-*.gdf', index=True): """ Create db from list of gdf file glob Parameters ---------- re : str File glob to load. index : bool Create index on neurons for speed. Returns ------- None See also -------- sqlite3.connect.cursor, sqlite3.connect """ self.cursor.execute('CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)') tic = now() for f in glob.glob(re): print(f) while True: try: for data in self._blockread(f): self.cursor.executemany('INSERT INTO spikes VALUES (?, ?)', data) self.conn.commit() except: continue break toc = now() if self.debug: print('Inserts took %g seconds.' % (toc-tic)) # Optionally, create index for speed if index: tic = now() self.cursor.execute('CREATE INDEX neuron_index on spikes (neuron)') toc = now() if self.debug: print('Indexed db in %g seconds.' % (toc-tic))
[ "def", "create", "(", "self", ",", "re", "=", "'brunel-py-ex-*.gdf'", ",", "index", "=", "True", ")", ":", "self", ".", "cursor", ".", "execute", "(", "'CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)'", ")", "tic", "=", "now", "(", ")", "for"...
Create db from list of gdf file glob Parameters ---------- re : str File glob to load. index : bool Create index on neurons for speed. Returns ------- None See also -------- sqlite3.connect.cursor, sqlite3.connect
[ "Create", "db", "from", "list", "of", "gdf", "file", "glob" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/gdf.py#L133-L178
INM-6/hybridLFPy
hybridLFPy/gdf.py
GDF.create_from_list
def create_from_list(self, re=[], index=True): """ Create db from list of arrays. Parameters ---------- re : list Index of element is cell index, and element `i` an array of spike times in ms. index : bool Create index on neurons for speed. Returns ------- None See also -------- sqlite3.connect.cursor, sqlite3.connect """ self.cursor.execute('CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)') tic = now() i = 0 for x in re: data = list(zip([i] * len(x), x)) self.cursor.executemany('INSERT INTO spikes VALUES (?, ?)', data) i += 1 self.conn.commit() toc = now() if self.debug: print('Inserts took %g seconds.' % (toc-tic)) # Optionally, create index for speed if index: tic = now() self.cursor.execute('CREATE INDEX neuron_index on spikes (neuron)') toc = now() if self.debug: print('Indexed db in %g seconds.' % (toc-tic))
python
def create_from_list(self, re=[], index=True): """ Create db from list of arrays. Parameters ---------- re : list Index of element is cell index, and element `i` an array of spike times in ms. index : bool Create index on neurons for speed. Returns ------- None See also -------- sqlite3.connect.cursor, sqlite3.connect """ self.cursor.execute('CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)') tic = now() i = 0 for x in re: data = list(zip([i] * len(x), x)) self.cursor.executemany('INSERT INTO spikes VALUES (?, ?)', data) i += 1 self.conn.commit() toc = now() if self.debug: print('Inserts took %g seconds.' % (toc-tic)) # Optionally, create index for speed if index: tic = now() self.cursor.execute('CREATE INDEX neuron_index on spikes (neuron)') toc = now() if self.debug: print('Indexed db in %g seconds.' % (toc-tic))
[ "def", "create_from_list", "(", "self", ",", "re", "=", "[", "]", ",", "index", "=", "True", ")", ":", "self", ".", "cursor", ".", "execute", "(", "'CREATE TABLE IF NOT EXISTS spikes (neuron INT UNSIGNED, time REAL)'", ")", "tic", "=", "now", "(", ")", "i", ...
Create db from list of arrays. Parameters ---------- re : list Index of element is cell index, and element `i` an array of spike times in ms. index : bool Create index on neurons for speed. Returns ------- None See also -------- sqlite3.connect.cursor, sqlite3.connect
[ "Create", "db", "from", "list", "of", "arrays", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/gdf.py#L181-L221
INM-6/hybridLFPy
hybridLFPy/gdf.py
GDF.select
def select(self, neurons): """ Select spike trains. Parameters ---------- neurons : numpy.ndarray or list Array of list of neurons. Returns ------- list List of numpy.ndarray objects containing spike times. See also -------- sqlite3.connect.cursor """ s = [] for neuron in neurons: self.cursor.execute('SELECT time FROM spikes where neuron = %d' % neuron) sel = self.cursor.fetchall() spikes = np.array(sel).flatten() s.append(spikes) return s
python
def select(self, neurons): """ Select spike trains. Parameters ---------- neurons : numpy.ndarray or list Array of list of neurons. Returns ------- list List of numpy.ndarray objects containing spike times. See also -------- sqlite3.connect.cursor """ s = [] for neuron in neurons: self.cursor.execute('SELECT time FROM spikes where neuron = %d' % neuron) sel = self.cursor.fetchall() spikes = np.array(sel).flatten() s.append(spikes) return s
[ "def", "select", "(", "self", ",", "neurons", ")", ":", "s", "=", "[", "]", "for", "neuron", "in", "neurons", ":", "self", ".", "cursor", ".", "execute", "(", "'SELECT time FROM spikes where neuron = %d'", "%", "neuron", ")", "sel", "=", "self", ".", "cu...
Select spike trains. Parameters ---------- neurons : numpy.ndarray or list Array of list of neurons. Returns ------- list List of numpy.ndarray objects containing spike times. See also -------- sqlite3.connect.cursor
[ "Select", "spike", "trains", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/gdf.py#L224-L252