body_hash stringlengths 64 64 | body stringlengths 23 109k | docstring stringlengths 1 57k | path stringlengths 4 198 | name stringlengths 1 115 | repository_name stringlengths 7 111 | repository_stars float64 0 191k | lang stringclasses 1 value | body_without_docstring stringlengths 14 108k | unified stringlengths 45 133k |
|---|---|---|---|---|---|---|---|---|---|
ef1b295db58121fdac25b5780a274f70d03030b346ca2bc6dc5df08e8e626766 | @staticmethod
def makeHexPattern(length):
'A method that returns a regex pattern that matches a case-insensitive hexadecimal number of exactly a specified\n length.\n Parameter "length" - the length of the pattern in digits/characters/nibbles\n Returns the corresponding pattern.\n '
return (('(?:\\A|[^0-9a-zA-Z])([0-9a-fA-F]{' + str(length)) + '})(?:[^0-9a-zA-Z]|\\Z)') | A method that returns a regex pattern that matches a case-insensitive hexadecimal number of exactly a specified
length.
Parameter "length" - the length of the pattern in digits/characters/nibbles
Returns the corresponding pattern. | sc_email_parser/EmailParser.py | makeHexPattern | lizhecht/resilient-community-apps | 65 | python | @staticmethod
def makeHexPattern(length):
'A method that returns a regex pattern that matches a case-insensitive hexadecimal number of exactly a specified\n length.\n Parameter "length" - the length of the pattern in digits/characters/nibbles\n Returns the corresponding pattern.\n '
return (('(?:\\A|[^0-9a-zA-Z])([0-9a-fA-F]{' + str(length)) + '})(?:[^0-9a-zA-Z]|\\Z)') | @staticmethod
def makeHexPattern(length):
'A method that returns a regex pattern that matches a case-insensitive hexadecimal number of exactly a specified\n length.\n Parameter "length" - the length of the pattern in digits/characters/nibbles\n Returns the corresponding pattern.\n '
return (('(?:\\A|[^0-9a-zA-Z])([0-9a-fA-F]{' + str(length)) + '})(?:[^0-9a-zA-Z]|\\Z)')<|docstring|>A method that returns a regex pattern that matches a case-insensitive hexadecimal number of exactly a specified
length.
Parameter "length" - the length of the pattern in digits/characters/nibbles
Returns the corresponding pattern.<|endoftext|> |
dd5e058d78ff495f6ec6ea3be2dcb93d52888ef257f0bd21343a3a69a9d74576 | def processArtifactCategory(self, regex, artifactType, description, *optionalListModifierFn):
'A method to process a category of artifact, based on a regular expression. Each match of the regex in the\n email message contents is added as an artifact of the same type and description. The optional list modifier\n function, if present, is run against the list of matches before the artifact addition takes place.\n Parameter "regex" - the regular expression to use to pick out the text to interpret as an artifact\n Parameter "artifactType" - the type of the artifact\n Parameter "description" - the description of the artifact\n Parameter "optionalListModifierFn" - a function to run across the list of matches to filter inappropriate values\n No return value.\n '
for theText in self.emailContents:
dataList = re.findall(regex, theText, re.UNICODE)
if ((dataList is not None) and (len(dataList) > 0)):
if (optionalListModifierFn is not None):
for aFunction in optionalListModifierFn:
dataList = map(aFunction, dataList)
dataList = [x for x in dataList if (x is not None)]
self.printList(u'Found {0} ( {1} )'.format(artifactType, description), dataList)
for x in dataList:
self.addUniqueArtifact(str(x), artifactType, description)
else:
log.debug(u'Could not find artifact {0} for regex {1}'.format(artifactType, regex)) | A method to process a category of artifact, based on a regular expression. Each match of the regex in the
email message contents is added as an artifact of the same type and description. The optional list modifier
function, if present, is run against the list of matches before the artifact addition takes place.
Parameter "regex" - the regular expression to use to pick out the text to interpret as an artifact
Parameter "artifactType" - the type of the artifact
Parameter "description" - the description of the artifact
Parameter "optionalListModifierFn" - a function to run across the list of matches to filter inappropriate values
No return value. | sc_email_parser/EmailParser.py | processArtifactCategory | lizhecht/resilient-community-apps | 65 | python | def processArtifactCategory(self, regex, artifactType, description, *optionalListModifierFn):
'A method to process a category of artifact, based on a regular expression. Each match of the regex in the\n email message contents is added as an artifact of the same type and description. The optional list modifier\n function, if present, is run against the list of matches before the artifact addition takes place.\n Parameter "regex" - the regular expression to use to pick out the text to interpret as an artifact\n Parameter "artifactType" - the type of the artifact\n Parameter "description" - the description of the artifact\n Parameter "optionalListModifierFn" - a function to run across the list of matches to filter inappropriate values\n No return value.\n '
for theText in self.emailContents:
dataList = re.findall(regex, theText, re.UNICODE)
if ((dataList is not None) and (len(dataList) > 0)):
if (optionalListModifierFn is not None):
for aFunction in optionalListModifierFn:
dataList = map(aFunction, dataList)
dataList = [x for x in dataList if (x is not None)]
self.printList(u'Found {0} ( {1} )'.format(artifactType, description), dataList)
for x in dataList:
self.addUniqueArtifact(str(x), artifactType, description)
else:
log.debug(u'Could not find artifact {0} for regex {1}'.format(artifactType, regex)) | def processArtifactCategory(self, regex, artifactType, description, *optionalListModifierFn):
'A method to process a category of artifact, based on a regular expression. Each match of the regex in the\n email message contents is added as an artifact of the same type and description. The optional list modifier\n function, if present, is run against the list of matches before the artifact addition takes place.\n Parameter "regex" - the regular expression to use to pick out the text to interpret as an artifact\n Parameter "artifactType" - the type of the artifact\n Parameter "description" - the description of the artifact\n Parameter "optionalListModifierFn" - a function to run across the list of matches to filter inappropriate values\n No return value.\n '
for theText in self.emailContents:
dataList = re.findall(regex, theText, re.UNICODE)
if ((dataList is not None) and (len(dataList) > 0)):
if (optionalListModifierFn is not None):
for aFunction in optionalListModifierFn:
dataList = map(aFunction, dataList)
dataList = [x for x in dataList if (x is not None)]
self.printList(u'Found {0} ( {1} )'.format(artifactType, description), dataList)
for x in dataList:
self.addUniqueArtifact(str(x), artifactType, description)
else:
log.debug(u'Could not find artifact {0} for regex {1}'.format(artifactType, regex))<|docstring|>A method to process a category of artifact, based on a regular expression. Each match of the regex in the
email message contents is added as an artifact of the same type and description. The optional list modifier
function, if present, is run against the list of matches before the artifact addition takes place.
Parameter "regex" - the regular expression to use to pick out the text to interpret as an artifact
Parameter "artifactType" - the type of the artifact
Parameter "description" - the description of the artifact
Parameter "optionalListModifierFn" - a function to run across the list of matches to filter inappropriate values
No return value.<|endoftext|> |
b1037e2ebad34611e7753fd1aa65604c3f6b25674617c851f81c8e786209d80f | def checkIPAllowList(self, anAddress):
' A method to check a list of IP Addresses aginst the allowlist. '
allowList = (self.ipV4AllowListConverted if ('.' in anAddress.addressAsString) else self.ipV6AllowListConverted)
log.debug(u'Going to filter {0} against allowlist {1}'.format(anAddress, allowList))
return allowList.checkIsItemNotOnAllowList(anAddress) | A method to check a list of IP Addresses aginst the allowlist. | sc_email_parser/EmailParser.py | checkIPAllowList | lizhecht/resilient-community-apps | 65 | python | def checkIPAllowList(self, anAddress):
' '
allowList = (self.ipV4AllowListConverted if ('.' in anAddress.addressAsString) else self.ipV6AllowListConverted)
log.debug(u'Going to filter {0} against allowlist {1}'.format(anAddress, allowList))
return allowList.checkIsItemNotOnAllowList(anAddress) | def checkIPAllowList(self, anAddress):
' '
allowList = (self.ipV4AllowListConverted if ('.' in anAddress.addressAsString) else self.ipV6AllowListConverted)
log.debug(u'Going to filter {0} against allowlist {1}'.format(anAddress, allowList))
return allowList.checkIsItemNotOnAllowList(anAddress)<|docstring|>A method to check a list of IP Addresses aginst the allowlist.<|endoftext|> |
2105e7bcdbefa7a454904e36d4ec4f22011436fe75d7a56556136ff5da0ed741 | def checkDomainAllowList(self, aURL):
' A method to check a list of URLs aginst a allowlist. '
log.debug(u'Going to filter {0} against allowlist {1}'.format(aURL, self.domainAllowListConverted))
return self.domainAllowListConverted.checkIsItemNotOnAllowList(aURL) | A method to check a list of URLs aginst a allowlist. | sc_email_parser/EmailParser.py | checkDomainAllowList | lizhecht/resilient-community-apps | 65 | python | def checkDomainAllowList(self, aURL):
' '
log.debug(u'Going to filter {0} against allowlist {1}'.format(aURL, self.domainAllowListConverted))
return self.domainAllowListConverted.checkIsItemNotOnAllowList(aURL) | def checkDomainAllowList(self, aURL):
' '
log.debug(u'Going to filter {0} against allowlist {1}'.format(aURL, self.domainAllowListConverted))
return self.domainAllowListConverted.checkIsItemNotOnAllowList(aURL)<|docstring|>A method to check a list of URLs aginst a allowlist.<|endoftext|> |
98765ad886285243bf1053756b5b842f702b026a66acc6a274def6b8fcbbe6db | def processIPFully(self, theAddressAsString):
' A method to filter inadvertantly matched IP strings and then filter out IP addresses that appear on the allowlist.\n Parameter "theAddressAsString" - The address in question as a string\n Return value - if the address passes the tests then it is returned, otherwise None.\n '
theAddressAsString = self.cleanIP(theAddressAsString)
if (theAddressAsString is not None):
theAddressAsObj = IPAddress(theAddressAsString)
if (theAddressAsObj is not None):
theAddressAsObj = self.checkIPAllowList(theAddressAsObj)
if (theAddressAsObj is not None):
return theAddressAsObj.addressAsString
return None | A method to filter inadvertantly matched IP strings and then filter out IP addresses that appear on the allowlist.
Parameter "theAddressAsString" - The address in question as a string
Return value - if the address passes the tests then it is returned, otherwise None. | sc_email_parser/EmailParser.py | processIPFully | lizhecht/resilient-community-apps | 65 | python | def processIPFully(self, theAddressAsString):
' A method to filter inadvertantly matched IP strings and then filter out IP addresses that appear on the allowlist.\n Parameter "theAddressAsString" - The address in question as a string\n Return value - if the address passes the tests then it is returned, otherwise None.\n '
theAddressAsString = self.cleanIP(theAddressAsString)
if (theAddressAsString is not None):
theAddressAsObj = IPAddress(theAddressAsString)
if (theAddressAsObj is not None):
theAddressAsObj = self.checkIPAllowList(theAddressAsObj)
if (theAddressAsObj is not None):
return theAddressAsObj.addressAsString
return None | def processIPFully(self, theAddressAsString):
' A method to filter inadvertantly matched IP strings and then filter out IP addresses that appear on the allowlist.\n Parameter "theAddressAsString" - The address in question as a string\n Return value - if the address passes the tests then it is returned, otherwise None.\n '
theAddressAsString = self.cleanIP(theAddressAsString)
if (theAddressAsString is not None):
theAddressAsObj = IPAddress(theAddressAsString)
if (theAddressAsObj is not None):
theAddressAsObj = self.checkIPAllowList(theAddressAsObj)
if (theAddressAsObj is not None):
return theAddressAsObj.addressAsString
return None<|docstring|>A method to filter inadvertantly matched IP strings and then filter out IP addresses that appear on the allowlist.
Parameter "theAddressAsString" - The address in question as a string
Return value - if the address passes the tests then it is returned, otherwise None.<|endoftext|> |
7aa573a6dd1421963fcc005f3e6641bfb1ec9d3c58d6674ce1eed6232385d6f7 | def processAttachments(self):
' A method to process the email attachments, if present. Each non-inline email attachment is added as an\n attachment to the incident, and its name is added as an artifact. Inline attachments are assumed to be unimportant.\n No return value.\n '
for attachment in emailmessage.attachments:
if (not attachment.inline):
incident.addEmailAttachment(attachment.id)
incident.addArtifact('Email Attachment Name', attachment.suggested_filename, '') | A method to process the email attachments, if present. Each non-inline email attachment is added as an
attachment to the incident, and its name is added as an artifact. Inline attachments are assumed to be unimportant.
No return value. | sc_email_parser/EmailParser.py | processAttachments | lizhecht/resilient-community-apps | 65 | python | def processAttachments(self):
' A method to process the email attachments, if present. Each non-inline email attachment is added as an\n attachment to the incident, and its name is added as an artifact. Inline attachments are assumed to be unimportant.\n No return value.\n '
for attachment in emailmessage.attachments:
if (not attachment.inline):
incident.addEmailAttachment(attachment.id)
incident.addArtifact('Email Attachment Name', attachment.suggested_filename, ) | def processAttachments(self):
' A method to process the email attachments, if present. Each non-inline email attachment is added as an\n attachment to the incident, and its name is added as an artifact. Inline attachments are assumed to be unimportant.\n No return value.\n '
for attachment in emailmessage.attachments:
if (not attachment.inline):
incident.addEmailAttachment(attachment.id)
incident.addArtifact('Email Attachment Name', attachment.suggested_filename, )<|docstring|>A method to process the email attachments, if present. Each non-inline email attachment is added as an
attachment to the incident, and its name is added as an artifact. Inline attachments are assumed to be unimportant.
No return value.<|endoftext|> |
d8fb86800ba17f9b41d46529c990bd46ced1985a02740ce9febb902df80cf19a | def addBasicInfoToIncident(self):
'A method to perform basic information extraction from the email message.\n The email message sender address, including personal name if present, is set as the reporter field\n in the incident. An artifact is created from the email message subject with the type "Email Subject".\n No return value.\n '
newReporterInfo = emailmessage.sender.address
if (hasattr(emailmessage.sender, 'name') and (emailmessage.sender.name is not None)):
newReporterInfo = u'{0} <{1}>'.format(emailmessage.sender.name, emailmessage.sender.address)
log.info(u'Adding reporter field "{0}"'.format(newReporterInfo))
incident.reporter = newReporterInfo
if (subject is not None):
self.addUniqueArtifact(u'{0}'.format(subject), 'Email Subject', 'Suspicious email subject') | A method to perform basic information extraction from the email message.
The email message sender address, including personal name if present, is set as the reporter field
in the incident. An artifact is created from the email message subject with the type "Email Subject".
No return value. | sc_email_parser/EmailParser.py | addBasicInfoToIncident | lizhecht/resilient-community-apps | 65 | python | def addBasicInfoToIncident(self):
'A method to perform basic information extraction from the email message.\n The email message sender address, including personal name if present, is set as the reporter field\n in the incident. An artifact is created from the email message subject with the type "Email Subject".\n No return value.\n '
newReporterInfo = emailmessage.sender.address
if (hasattr(emailmessage.sender, 'name') and (emailmessage.sender.name is not None)):
newReporterInfo = u'{0} <{1}>'.format(emailmessage.sender.name, emailmessage.sender.address)
log.info(u'Adding reporter field "{0}"'.format(newReporterInfo))
incident.reporter = newReporterInfo
if (subject is not None):
self.addUniqueArtifact(u'{0}'.format(subject), 'Email Subject', 'Suspicious email subject') | def addBasicInfoToIncident(self):
'A method to perform basic information extraction from the email message.\n The email message sender address, including personal name if present, is set as the reporter field\n in the incident. An artifact is created from the email message subject with the type "Email Subject".\n No return value.\n '
newReporterInfo = emailmessage.sender.address
if (hasattr(emailmessage.sender, 'name') and (emailmessage.sender.name is not None)):
newReporterInfo = u'{0} <{1}>'.format(emailmessage.sender.name, emailmessage.sender.address)
log.info(u'Adding reporter field "{0}"'.format(newReporterInfo))
incident.reporter = newReporterInfo
if (subject is not None):
self.addUniqueArtifact(u'{0}'.format(subject), 'Email Subject', 'Suspicious email subject')<|docstring|>A method to perform basic information extraction from the email message.
The email message sender address, including personal name if present, is set as the reporter field
in the incident. An artifact is created from the email message subject with the type "Email Subject".
No return value.<|endoftext|> |
5dca1c98219960e7633020cff9de28ee9f5ba9f1896300d92429d0312dedf5ce | def getPSD(psd_file):
'Return PSDImage object.'
return PSDImage.open(psd_file) | Return PSDImage object. | application/psd_utils.py | getPSD | Igorxp5/event-badge-generator | 2 | python | def getPSD(psd_file):
return PSDImage.open(psd_file) | def getPSD(psd_file):
return PSDImage.open(psd_file)<|docstring|>Return PSDImage object.<|endoftext|> |
84bc61be8026c13218edcc52893b199c84b9ea074ad9b98bcc5e18e8d4d77bf6 | def get_psd_fonts(psd):
'Get list of fonts from a PSD file.'
layers = get_all_psd_layers(psd)
fonts = []
for layer in layers:
if (PSDLayer(layer.kind) is PSDLayer.TYPE):
font = get_text_layer_properties(layer)[0]
fonts.append(font)
return fonts | Get list of fonts from a PSD file. | application/psd_utils.py | get_psd_fonts | Igorxp5/event-badge-generator | 2 | python | def get_psd_fonts(psd):
layers = get_all_psd_layers(psd)
fonts = []
for layer in layers:
if (PSDLayer(layer.kind) is PSDLayer.TYPE):
font = get_text_layer_properties(layer)[0]
fonts.append(font)
return fonts | def get_psd_fonts(psd):
layers = get_all_psd_layers(psd)
fonts = []
for layer in layers:
if (PSDLayer(layer.kind) is PSDLayer.TYPE):
font = get_text_layer_properties(layer)[0]
fonts.append(font)
return fonts<|docstring|>Get list of fonts from a PSD file.<|endoftext|> |
606fa97e3bda8d9f3e44052a7e4094dd71e58e431e22eb4b45b3b5ba91ffdc13 | def get_editable_psd_layers(psd_file_path):
'Return a list of the psd layers that can be edited.\n Each list item contains id, name, type and base64 image.\n '
psd = getPSD(psd_file_path)
all_psd_layers = get_all_psd_layers(psd)
editable_types = (PSDLayer.TYPE, PSDLayer.PIXEL, PSDLayer.SHAPE, PSDLayer.SMART_OBJECT, PSDLayer.PSD_IMAGE)
editable_psd_layers = []
for layer in all_psd_layers:
id_ = layer.layer_id
name = layer.name
type_ = PSDLayer(layer.kind)
if (type_ in editable_types):
image_buffer = BytesIO()
layer_image = render_layer(psd_file_path, id_, original_size=False)
image_data = get_base64_from_pil_image(layer_image)
editable_psd_layers.append({'id': id_, 'name': name, 'type': type_, 'image_data': image_data})
return editable_psd_layers | Return a list of the psd layers that can be edited.
Each list item contains id, name, type and base64 image. | application/psd_utils.py | get_editable_psd_layers | Igorxp5/event-badge-generator | 2 | python | def get_editable_psd_layers(psd_file_path):
'Return a list of the psd layers that can be edited.\n Each list item contains id, name, type and base64 image.\n '
psd = getPSD(psd_file_path)
all_psd_layers = get_all_psd_layers(psd)
editable_types = (PSDLayer.TYPE, PSDLayer.PIXEL, PSDLayer.SHAPE, PSDLayer.SMART_OBJECT, PSDLayer.PSD_IMAGE)
editable_psd_layers = []
for layer in all_psd_layers:
id_ = layer.layer_id
name = layer.name
type_ = PSDLayer(layer.kind)
if (type_ in editable_types):
image_buffer = BytesIO()
layer_image = render_layer(psd_file_path, id_, original_size=False)
image_data = get_base64_from_pil_image(layer_image)
editable_psd_layers.append({'id': id_, 'name': name, 'type': type_, 'image_data': image_data})
return editable_psd_layers | def get_editable_psd_layers(psd_file_path):
'Return a list of the psd layers that can be edited.\n Each list item contains id, name, type and base64 image.\n '
psd = getPSD(psd_file_path)
all_psd_layers = get_all_psd_layers(psd)
editable_types = (PSDLayer.TYPE, PSDLayer.PIXEL, PSDLayer.SHAPE, PSDLayer.SMART_OBJECT, PSDLayer.PSD_IMAGE)
editable_psd_layers = []
for layer in all_psd_layers:
id_ = layer.layer_id
name = layer.name
type_ = PSDLayer(layer.kind)
if (type_ in editable_types):
image_buffer = BytesIO()
layer_image = render_layer(psd_file_path, id_, original_size=False)
image_data = get_base64_from_pil_image(layer_image)
editable_psd_layers.append({'id': id_, 'name': name, 'type': type_, 'image_data': image_data})
return editable_psd_layers<|docstring|>Return a list of the psd layers that can be edited.
Each list item contains id, name, type and base64 image.<|endoftext|> |
9fdc2c8bcc3588febcd281528975838b322a935dbd264c885f8cdddea505c073 | def test_create_user_with_successful(self):
'Test creating a new user with an email in sucessful'
email = 'example@example.com'
password = 'beanalytic1234'
'calling the create_user function on the user manager on our user model'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password)) | Test creating a new user with an email in sucessful | aapp/core/tests/test_models.py | test_create_user_with_successful | ghoshnilotpal8/reciepe-aapp-api | 0 | python | def test_create_user_with_successful(self):
email = 'example@example.com'
password = 'beanalytic1234'
'calling the create_user function on the user manager on our user model'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password)) | def test_create_user_with_successful(self):
email = 'example@example.com'
password = 'beanalytic1234'
'calling the create_user function on the user manager on our user model'
user = get_user_model().objects.create_user(email=email, password=password)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))<|docstring|>Test creating a new user with an email in sucessful<|endoftext|> |
a1ce4b3a515def5a8503d0150b4e56c7a587c32d791e637c303297ad5f29870c | def test_new_user_email_normalized(self):
'Test the email for a new user is normalized'
email = 'example@example.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower()) | Test the email for a new user is normalized | aapp/core/tests/test_models.py | test_new_user_email_normalized | ghoshnilotpal8/reciepe-aapp-api | 0 | python | def test_new_user_email_normalized(self):
email = 'example@example.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower()) | def test_new_user_email_normalized(self):
email = 'example@example.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())<|docstring|>Test the email for a new user is normalized<|endoftext|> |
7da94b3b00879ffd63c0f7770685fb90aa46e562e2c04cb515b4a962e39b4850 | def test_new_user_invalid_email(self):
'Test creating user with no email raises error'
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123') | Test creating user with no email raises error | aapp/core/tests/test_models.py | test_new_user_invalid_email | ghoshnilotpal8/reciepe-aapp-api | 0 | python | def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123') | def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')<|docstring|>Test creating user with no email raises error<|endoftext|> |
eae8bd6f3e93717829924dc6ec79a73643ea616df73d82259e611c2cfbf43c57 | def test_create_new_superuser(self):
'Test creating a new superuser'
user = get_user_model().objects.create_superuser('example@example.com', 'test123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff) | Test creating a new superuser | aapp/core/tests/test_models.py | test_create_new_superuser | ghoshnilotpal8/reciepe-aapp-api | 0 | python | def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser('example@example.com', 'test123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff) | def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser('example@example.com', 'test123')
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)<|docstring|>Test creating a new superuser<|endoftext|> |
0cb5fdfd6b0665c6e42f05926bbe8a804da7c022b353013146c0eeff56ce7120 | @classmethod
def validate_dto(cls, data: dict) -> bool:
'Validate the data-transfer object.'
required_keys_l1 = {'hash', 'status', 'deadline', 'meta'}
required_keys_l2 = {'channelName', 'address'}
return (cls.validate_dto_required(data, required_keys_l1) and cls.validate_dto_all(data, required_keys_l1) and cls.validate_dto_required(data['meta'], required_keys_l2) and cls.validate_dto_all(data['meta'], required_keys_l2)) | Validate the data-transfer object. | xpxchain/models/transaction/transaction_status_error.py | validate_dto | Sharmelen/python-xpx-chain-sdk | 1 | python | @classmethod
def validate_dto(cls, data: dict) -> bool:
required_keys_l1 = {'hash', 'status', 'deadline', 'meta'}
required_keys_l2 = {'channelName', 'address'}
return (cls.validate_dto_required(data, required_keys_l1) and cls.validate_dto_all(data, required_keys_l1) and cls.validate_dto_required(data['meta'], required_keys_l2) and cls.validate_dto_all(data['meta'], required_keys_l2)) | @classmethod
def validate_dto(cls, data: dict) -> bool:
required_keys_l1 = {'hash', 'status', 'deadline', 'meta'}
required_keys_l2 = {'channelName', 'address'}
return (cls.validate_dto_required(data, required_keys_l1) and cls.validate_dto_all(data, required_keys_l1) and cls.validate_dto_required(data['meta'], required_keys_l2) and cls.validate_dto_all(data['meta'], required_keys_l2))<|docstring|>Validate the data-transfer object.<|endoftext|> |
711e04ac6d88dc67a80ab40f535140f455686ebdc4f3a4e758d5021480935736 | @scenario('parameters.feature', 'stage volume request with a specified nvme nr io queues')
def test_stage_volume_request_with_a_specified_nvme_nr_io_queues():
'stage volume request with a specified nvme nr io queues.' | stage volume request with a specified nvme nr io queues. | tests/bdd/features/csi/node/test_parameters.py | test_stage_volume_request_with_a_specified_nvme_nr_io_queues | Abhinandan-Purkait/mayastor-control-plane | 2 | python | @scenario('parameters.feature', 'stage volume request with a specified nvme nr io queues')
def test_stage_volume_request_with_a_specified_nvme_nr_io_queues():
| @scenario('parameters.feature', 'stage volume request with a specified nvme nr io queues')
def test_stage_volume_request_with_a_specified_nvme_nr_io_queues():
<|docstring|>stage volume request with a specified nvme nr io queues.<|endoftext|> |
ef5c77c82a8b05e4d59b85ea963225536b3c864f37b6f1bbbc1bfedc438f0b31 | @given(parsers.parse('a csi node plugin with {io:d} IO queues configured'))
def a_csi_node_plugin_with_io_queues_configured(io):
'a csi node plugin with <IO> queues configured.' | a csi node plugin with <IO> queues configured. | tests/bdd/features/csi/node/test_parameters.py | a_csi_node_plugin_with_io_queues_configured | Abhinandan-Purkait/mayastor-control-plane | 2 | python | @given(parsers.parse('a csi node plugin with {io:d} IO queues configured'))
def a_csi_node_plugin_with_io_queues_configured(io):
| @given(parsers.parse('a csi node plugin with {io:d} IO queues configured'))
def a_csi_node_plugin_with_io_queues_configured(io):
<|docstring|>a csi node plugin with <IO> queues configured.<|endoftext|> |
64a1c6f2374b0d03b14909f4f5cfdafc9fbdb1fc6aae554a0d4e8970f0ddae73 | @given('an io-engine cluster')
def an_io_engine_cluster(setup):
'an io-engine cluster.' | an io-engine cluster. | tests/bdd/features/csi/node/test_parameters.py | an_io_engine_cluster | Abhinandan-Purkait/mayastor-control-plane | 2 | python | @given('an io-engine cluster')
def an_io_engine_cluster(setup):
| @given('an io-engine cluster')
def an_io_engine_cluster(setup):
<|docstring|>an io-engine cluster.<|endoftext|> |
ed4bc7d6959005c6de4c0d390c81e6a4525c6d90eccd9ba045064b199453b85d | @when('staging a volume')
def staging_a_volume(staging_a_volume):
'staging a volume.' | staging a volume. | tests/bdd/features/csi/node/test_parameters.py | staging_a_volume | Abhinandan-Purkait/mayastor-control-plane | 2 | python | @when('staging a volume')
def staging_a_volume(staging_a_volume):
| @when('staging a volume')
def staging_a_volume(staging_a_volume):
<|docstring|>staging a volume.<|endoftext|> |
f66678537d9b2438f9bc87055faccd975751c77ea34efc4f88069b84fe9f659c | @then(parsers.parse('the nvme device should report {total:d} TOTAL queues'))
def the_nvme_device_should_report_total_queues(total, the_nvme_device_should_report_total_queues):
'the nvme device should report <TOTAL> queues.' | the nvme device should report <TOTAL> queues. | tests/bdd/features/csi/node/test_parameters.py | the_nvme_device_should_report_total_queues | Abhinandan-Purkait/mayastor-control-plane | 2 | python | @then(parsers.parse('the nvme device should report {total:d} TOTAL queues'))
def the_nvme_device_should_report_total_queues(total, the_nvme_device_should_report_total_queues):
| @then(parsers.parse('the nvme device should report {total:d} TOTAL queues'))
def the_nvme_device_should_report_total_queues(total, the_nvme_device_should_report_total_queues):
<|docstring|>the nvme device should report <TOTAL> queues.<|endoftext|> |
3be619674d151c432bfb54a9dbe7144b3803c07b3ebda623583d717b1c733277 | def quiz1(self):
'retrieve BTC, ETH, XRP, LTC historical price data from KRAKEN, plot the trend base on them and\n calculate their Pearson correlation coefficient'
(_, ax) = plt.subplots(len(_data_file_list), figsize=(12, 7))
datas = {}
for (i, file) in enumerate(_data_file_list):
pair_name = file.split('_')[1]
pair_data = pd.read_csv((_data_path / file), index_col='Date', parse_dates=True, skiprows=1)
pair_data.set_index(pd.to_datetime(pair_data.index, format='%Y-%m-%d %I-%p'), inplace=True, verify_integrity=True)
datas[pair_name] = pair_data.sort_index()
print('pair: {}, head data: \n{}'.format(pair_name, datas[pair_name].head(10)))
new_ylabel = (pair_name + '($)')
g = sns.relplot(x='Date', y=new_ylabel, kind='line', data=datas[pair_name].head(50).rename(columns={'Close': new_ylabel}).reset_index(), ax=ax[i])
g.set(ylabel=pair_name)
plt.close(g.fig)
plt.tight_layout()
plt.show()
for (c1, c2) in itertools.combinations(datas.keys(), 2):
print('Pearson correlation coefficient between {} and {}: {}'.format(c1, c2, scipy.stats.pearsonr(datas[c1].Close, datas[c2].Close)))
corr_data = pd.DataFrame({c: v.Close for (c, v) in datas.items()})
plt.figure(figsize=(12, 7))
plt.title('Pearson correlation coefficients between BTC, ETH, XRP, LTC')
sns.heatmap(corr_data.corr(), vmin=(- 1.0), vmax=1.0, square=True, annot=True)
plt.show() | retrieve BTC, ETH, XRP, LTC historical price data from KRAKEN, plot the trend base on them and
calculate their Pearson correlation coefficient | q1.py | quiz1 | LoveULin/BitCapitalCA | 0 | python | def quiz1(self):
'retrieve BTC, ETH, XRP, LTC historical price data from KRAKEN, plot the trend base on them and\n calculate their Pearson correlation coefficient'
(_, ax) = plt.subplots(len(_data_file_list), figsize=(12, 7))
datas = {}
for (i, file) in enumerate(_data_file_list):
pair_name = file.split('_')[1]
pair_data = pd.read_csv((_data_path / file), index_col='Date', parse_dates=True, skiprows=1)
pair_data.set_index(pd.to_datetime(pair_data.index, format='%Y-%m-%d %I-%p'), inplace=True, verify_integrity=True)
datas[pair_name] = pair_data.sort_index()
print('pair: {}, head data: \n{}'.format(pair_name, datas[pair_name].head(10)))
new_ylabel = (pair_name + '($)')
g = sns.relplot(x='Date', y=new_ylabel, kind='line', data=datas[pair_name].head(50).rename(columns={'Close': new_ylabel}).reset_index(), ax=ax[i])
g.set(ylabel=pair_name)
plt.close(g.fig)
plt.tight_layout()
plt.show()
for (c1, c2) in itertools.combinations(datas.keys(), 2):
print('Pearson correlation coefficient between {} and {}: {}'.format(c1, c2, scipy.stats.pearsonr(datas[c1].Close, datas[c2].Close)))
corr_data = pd.DataFrame({c: v.Close for (c, v) in datas.items()})
plt.figure(figsize=(12, 7))
plt.title('Pearson correlation coefficients between BTC, ETH, XRP, LTC')
sns.heatmap(corr_data.corr(), vmin=(- 1.0), vmax=1.0, square=True, annot=True)
plt.show() | def quiz1(self):
'retrieve BTC, ETH, XRP, LTC historical price data from KRAKEN, plot the trend base on them and\n calculate their Pearson correlation coefficient'
(_, ax) = plt.subplots(len(_data_file_list), figsize=(12, 7))
datas = {}
for (i, file) in enumerate(_data_file_list):
pair_name = file.split('_')[1]
pair_data = pd.read_csv((_data_path / file), index_col='Date', parse_dates=True, skiprows=1)
pair_data.set_index(pd.to_datetime(pair_data.index, format='%Y-%m-%d %I-%p'), inplace=True, verify_integrity=True)
datas[pair_name] = pair_data.sort_index()
print('pair: {}, head data: \n{}'.format(pair_name, datas[pair_name].head(10)))
new_ylabel = (pair_name + '($)')
g = sns.relplot(x='Date', y=new_ylabel, kind='line', data=datas[pair_name].head(50).rename(columns={'Close': new_ylabel}).reset_index(), ax=ax[i])
g.set(ylabel=pair_name)
plt.close(g.fig)
plt.tight_layout()
plt.show()
for (c1, c2) in itertools.combinations(datas.keys(), 2):
print('Pearson correlation coefficient between {} and {}: {}'.format(c1, c2, scipy.stats.pearsonr(datas[c1].Close, datas[c2].Close)))
corr_data = pd.DataFrame({c: v.Close for (c, v) in datas.items()})
plt.figure(figsize=(12, 7))
plt.title('Pearson correlation coefficients between BTC, ETH, XRP, LTC')
sns.heatmap(corr_data.corr(), vmin=(- 1.0), vmax=1.0, square=True, annot=True)
plt.show()<|docstring|>retrieve BTC, ETH, XRP, LTC historical price data from KRAKEN, plot the trend base on them and
calculate their Pearson correlation coefficient<|endoftext|> |
a13516ab6caaa09ac45c3141f68285931d75070e0101ab61115ad38a1ffefdd3 | @pytest.fixture
def fieldset(xdim=20, ydim=20):
' Standard unit mesh fieldset '
lon = np.linspace(0.0, 1.0, xdim, dtype=np.float32)
lat = np.linspace(0.0, 1.0, ydim, dtype=np.float32)
(U, V) = np.meshgrid(lat, lon)
data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)}
dimensions = {'lat': lat, 'lon': lon}
return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) | Standard unit mesh fieldset | parcels/tests/test_kernel_language.py | fieldset | pdnooteboom/NA_forams | 1 | python | @pytest.fixture
def fieldset(xdim=20, ydim=20):
' '
lon = np.linspace(0.0, 1.0, xdim, dtype=np.float32)
lat = np.linspace(0.0, 1.0, ydim, dtype=np.float32)
(U, V) = np.meshgrid(lat, lon)
data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)}
dimensions = {'lat': lat, 'lon': lon}
return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) | @pytest.fixture
def fieldset(xdim=20, ydim=20):
' '
lon = np.linspace(0.0, 1.0, xdim, dtype=np.float32)
lat = np.linspace(0.0, 1.0, ydim, dtype=np.float32)
(U, V) = np.meshgrid(lat, lon)
data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)}
dimensions = {'lat': lat, 'lon': lon}
return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)<|docstring|>Standard unit mesh fieldset<|endoftext|> |
d3ff7e482a73ad3535b86d7542c78c17d7cc0e67c435847fec4bbc34932665b9 | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [('Add', '2 + 5', 7), ('Sub', '6 - 2', 4), ('Mul', '3 * 5', 15), ('Div', '24 / 4', 6)])
def test_expression_int(fieldset, mode, name, expr, result, npart=10):
' Test basic arithmetic expressions '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
pset.execute(expr_kernel(('Test%s' % name), pset, expr), endtime=1.0, dt=1.0)
assert np.array([(result == particle.p) for particle in pset]).all() | Test basic arithmetic expressions | parcels/tests/test_kernel_language.py | test_expression_int | pdnooteboom/NA_forams | 1 | python | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [('Add', '2 + 5', 7), ('Sub', '6 - 2', 4), ('Mul', '3 * 5', 15), ('Div', '24 / 4', 6)])
def test_expression_int(fieldset, mode, name, expr, result, npart=10):
' '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
pset.execute(expr_kernel(('Test%s' % name), pset, expr), endtime=1.0, dt=1.0)
assert np.array([(result == particle.p) for particle in pset]).all() | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [('Add', '2 + 5', 7), ('Sub', '6 - 2', 4), ('Mul', '3 * 5', 15), ('Div', '24 / 4', 6)])
def test_expression_int(fieldset, mode, name, expr, result, npart=10):
' '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
pset.execute(expr_kernel(('Test%s' % name), pset, expr), endtime=1.0, dt=1.0)
assert np.array([(result == particle.p) for particle in pset]).all()<|docstring|>Test basic arithmetic expressions<|endoftext|> |
63f9857e06fcab93e40b32586cc2f0847b51e1297d19920527e3c05019ec03e0 | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [('Add', '2. + 5.', 7), ('Sub', '6. - 2.', 4), ('Mul', '3. * 5.', 15), ('Div', '24. / 4.', 6), ('Pow', '2 ** 3', 8)])
def test_expression_float(fieldset, mode, name, expr, result, npart=10):
' Test basic arithmetic expressions '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
pset.execute(expr_kernel(('Test%s' % name), pset, expr), endtime=1.0, dt=1.0)
assert np.array([(result == particle.p) for particle in pset]).all() | Test basic arithmetic expressions | parcels/tests/test_kernel_language.py | test_expression_float | pdnooteboom/NA_forams | 1 | python | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [('Add', '2. + 5.', 7), ('Sub', '6. - 2.', 4), ('Mul', '3. * 5.', 15), ('Div', '24. / 4.', 6), ('Pow', '2 ** 3', 8)])
def test_expression_float(fieldset, mode, name, expr, result, npart=10):
' '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
pset.execute(expr_kernel(('Test%s' % name), pset, expr), endtime=1.0, dt=1.0)
assert np.array([(result == particle.p) for particle in pset]).all() | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [('Add', '2. + 5.', 7), ('Sub', '6. - 2.', 4), ('Mul', '3. * 5.', 15), ('Div', '24. / 4.', 6), ('Pow', '2 ** 3', 8)])
def test_expression_float(fieldset, mode, name, expr, result, npart=10):
' '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
pset.execute(expr_kernel(('Test%s' % name), pset, expr), endtime=1.0, dt=1.0)
assert np.array([(result == particle.p) for particle in pset]).all()<|docstring|>Test basic arithmetic expressions<|endoftext|> |
8b28dbe7f96085ee5ed4d37b9d1b142925e0fc203e6dc6991552b14d00d221eb | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [('True', 'True', True), ('False', 'False', False), ('And', 'True and False', False), ('Or', 'True or False', True), ('Equal', '5 == 5', True), ('Lesser', '5 < 3', False), ('LesserEq', '3 <= 5', True), ('Greater', '4 > 2', True), ('GreaterEq', '2 >= 4', False)])
def test_expression_bool(fieldset, mode, name, expr, result, npart=10):
' Test basic arithmetic expressions '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
pset.execute(expr_kernel(('Test%s' % name), pset, expr), endtime=1.0, dt=1.0)
if (mode == 'jit'):
assert np.array([(result == (particle.p == 1)) for particle in pset]).all()
else:
assert np.array([(result == particle.p) for particle in pset]).all() | Test basic arithmetic expressions | parcels/tests/test_kernel_language.py | test_expression_bool | pdnooteboom/NA_forams | 1 | python | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [('True', 'True', True), ('False', 'False', False), ('And', 'True and False', False), ('Or', 'True or False', True), ('Equal', '5 == 5', True), ('Lesser', '5 < 3', False), ('LesserEq', '3 <= 5', True), ('Greater', '4 > 2', True), ('GreaterEq', '2 >= 4', False)])
def test_expression_bool(fieldset, mode, name, expr, result, npart=10):
' '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
pset.execute(expr_kernel(('Test%s' % name), pset, expr), endtime=1.0, dt=1.0)
if (mode == 'jit'):
assert np.array([(result == (particle.p == 1)) for particle in pset]).all()
else:
assert np.array([(result == particle.p) for particle in pset]).all() | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('name, expr, result', [('True', 'True', True), ('False', 'False', False), ('And', 'True and False', False), ('Or', 'True or False', True), ('Equal', '5 == 5', True), ('Lesser', '5 < 3', False), ('LesserEq', '3 <= 5', True), ('Greater', '4 > 2', True), ('GreaterEq', '2 >= 4', False)])
def test_expression_bool(fieldset, mode, name, expr, result, npart=10):
' '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
pset.execute(expr_kernel(('Test%s' % name), pset, expr), endtime=1.0, dt=1.0)
if (mode == 'jit'):
assert np.array([(result == (particle.p == 1)) for particle in pset]).all()
else:
assert np.array([(result == particle.p) for particle in pset]).all()<|docstring|>Test basic arithmetic expressions<|endoftext|> |
9861cb27286fe188ae4f0b8695bc7d0774d73e6258544417a84469e07961e2ed | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_while_if_break(fieldset, mode):
'Test while, if and break commands'
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.0)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])
def kernel(particle, fieldset, time):
while (particle.p < 30):
if (particle.p > 9):
break
particle.p += 1
if (particle.p > 5):
particle.p *= 2.0
pset.execute(kernel, endtime=1.0, dt=1.0)
assert np.allclose(np.array([p.p for p in pset]), 20.0, rtol=1e-12) | Test while, if and break commands | parcels/tests/test_kernel_language.py | test_while_if_break | pdnooteboom/NA_forams | 1 | python | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_while_if_break(fieldset, mode):
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.0)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])
def kernel(particle, fieldset, time):
while (particle.p < 30):
if (particle.p > 9):
break
particle.p += 1
if (particle.p > 5):
particle.p *= 2.0
pset.execute(kernel, endtime=1.0, dt=1.0)
assert np.allclose(np.array([p.p for p in pset]), 20.0, rtol=1e-12) | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_while_if_break(fieldset, mode):
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.0)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])
def kernel(particle, fieldset, time):
while (particle.p < 30):
if (particle.p > 9):
break
particle.p += 1
if (particle.p > 5):
particle.p *= 2.0
pset.execute(kernel, endtime=1.0, dt=1.0)
assert np.allclose(np.array([p.p for p in pset]), 20.0, rtol=1e-12)<|docstring|>Test while, if and break commands<|endoftext|> |
c49548e3a89b7fa39bce7bd90317bc69d049f8ea18efcc2c03c26b9b89f162c5 | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_nested_if(fieldset, mode):
'Test nested if commands'
class TestParticle(ptype[mode]):
p0 = Variable('p0', dtype=np.int32, initial=0)
p1 = Variable('p1', dtype=np.int32, initial=1)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=0, lat=0)
def kernel(particle, fieldset, time):
if (particle.p1 >= particle.p0):
var = particle.p0
if ((var + 1) < particle.p1):
particle.p1 = (- 1)
pset.execute(kernel, endtime=10, dt=1.0)
assert np.allclose([pset[0].p0, pset[0].p1], [0, 1]) | Test nested if commands | parcels/tests/test_kernel_language.py | test_nested_if | pdnooteboom/NA_forams | 1 | python | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_nested_if(fieldset, mode):
class TestParticle(ptype[mode]):
p0 = Variable('p0', dtype=np.int32, initial=0)
p1 = Variable('p1', dtype=np.int32, initial=1)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=0, lat=0)
def kernel(particle, fieldset, time):
if (particle.p1 >= particle.p0):
var = particle.p0
if ((var + 1) < particle.p1):
particle.p1 = (- 1)
pset.execute(kernel, endtime=10, dt=1.0)
assert np.allclose([pset[0].p0, pset[0].p1], [0, 1]) | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_nested_if(fieldset, mode):
class TestParticle(ptype[mode]):
p0 = Variable('p0', dtype=np.int32, initial=0)
p1 = Variable('p1', dtype=np.int32, initial=1)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=0, lat=0)
def kernel(particle, fieldset, time):
if (particle.p1 >= particle.p0):
var = particle.p0
if ((var + 1) < particle.p1):
particle.p1 = (- 1)
pset.execute(kernel, endtime=10, dt=1.0)
assert np.allclose([pset[0].p0, pset[0].p1], [0, 1])<|docstring|>Test nested if commands<|endoftext|> |
2ada239fa1209cd24563a3f0d97fafd0128c11c472bae960c7ba53efd896ac19 | def test_parcels_tmpvar_in_kernel(fieldset):
"Tests for error thrown if vartiable with 'tmp' defined in custom kernel"
error_thrown = False
pset = ParticleSet(fieldset, pclass=JITParticle, lon=0, lat=0)
def kernel_tmpvar(particle, fieldset, time):
parcels_tmpvar0 = 0
try:
pset.execute(kernel_tmpvar, endtime=1, dt=1.0)
except NotImplementedError:
error_thrown = True
assert error_thrown | Tests for error thrown if vartiable with 'tmp' defined in custom kernel | parcels/tests/test_kernel_language.py | test_parcels_tmpvar_in_kernel | pdnooteboom/NA_forams | 1 | python | def test_parcels_tmpvar_in_kernel(fieldset):
error_thrown = False
pset = ParticleSet(fieldset, pclass=JITParticle, lon=0, lat=0)
def kernel_tmpvar(particle, fieldset, time):
parcels_tmpvar0 = 0
try:
pset.execute(kernel_tmpvar, endtime=1, dt=1.0)
except NotImplementedError:
error_thrown = True
assert error_thrown | def test_parcels_tmpvar_in_kernel(fieldset):
error_thrown = False
pset = ParticleSet(fieldset, pclass=JITParticle, lon=0, lat=0)
def kernel_tmpvar(particle, fieldset, time):
parcels_tmpvar0 = 0
try:
pset.execute(kernel_tmpvar, endtime=1, dt=1.0)
except NotImplementedError:
error_thrown = True
assert error_thrown<|docstring|>Tests for error thrown if vartiable with 'tmp' defined in custom kernel<|endoftext|> |
5f82f890e4182c0e92a3ceea6101ac1152b050338731f94c132b4b1a0209c5a6 | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_if_withfield(fieldset, mode):
'Test combination of if and Field sampling commands'
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.0)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])
def kernel(particle, fieldset, time):
u = fieldset.U[(time, 0, 0, 1.0)]
particle.p = 0
if (fieldset.U[(time, 0, 0, 1.0)] == u):
particle.p += 1
if (fieldset.U[(time, 0, 0, 1.0)] == fieldset.U[(time, 0, 0, 1.0)]):
particle.p += 1
if True:
particle.p += 1
if ((fieldset.U[(time, 0, 0, 1.0)] == u) and (1 == 1)):
particle.p += 1
if ((fieldset.U[(time, 0, 0, 1.0)] == fieldset.U[(time, 0, 0, 1.0)]) and (fieldset.U[(time, 0, 0, 1.0)] == fieldset.U[(time, 0, 0, 1.0)])):
particle.p += 1
if (fieldset.U[(time, 0, 0, 1.0)] == u):
particle.p += 1
else:
particle.p += 1000
if (fieldset.U[(time, 0, 0, 1.0)] == 3):
particle.p += 1000
else:
particle.p += 1
pset.execute(kernel, endtime=1.0, dt=1.0)
assert np.allclose(np.array([p.p for p in pset]), 7.0, rtol=1e-12) | Test combination of if and Field sampling commands | parcels/tests/test_kernel_language.py | test_if_withfield | pdnooteboom/NA_forams | 1 | python | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_if_withfield(fieldset, mode):
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.0)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])
def kernel(particle, fieldset, time):
u = fieldset.U[(time, 0, 0, 1.0)]
particle.p = 0
if (fieldset.U[(time, 0, 0, 1.0)] == u):
particle.p += 1
if (fieldset.U[(time, 0, 0, 1.0)] == fieldset.U[(time, 0, 0, 1.0)]):
particle.p += 1
if True:
particle.p += 1
if ((fieldset.U[(time, 0, 0, 1.0)] == u) and (1 == 1)):
particle.p += 1
if ((fieldset.U[(time, 0, 0, 1.0)] == fieldset.U[(time, 0, 0, 1.0)]) and (fieldset.U[(time, 0, 0, 1.0)] == fieldset.U[(time, 0, 0, 1.0)])):
particle.p += 1
if (fieldset.U[(time, 0, 0, 1.0)] == u):
particle.p += 1
else:
particle.p += 1000
if (fieldset.U[(time, 0, 0, 1.0)] == 3):
particle.p += 1000
else:
particle.p += 1
pset.execute(kernel, endtime=1.0, dt=1.0)
assert np.allclose(np.array([p.p for p in pset]), 7.0, rtol=1e-12) | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
def test_if_withfield(fieldset, mode):
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.0)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])
def kernel(particle, fieldset, time):
u = fieldset.U[(time, 0, 0, 1.0)]
particle.p = 0
if (fieldset.U[(time, 0, 0, 1.0)] == u):
particle.p += 1
if (fieldset.U[(time, 0, 0, 1.0)] == fieldset.U[(time, 0, 0, 1.0)]):
particle.p += 1
if True:
particle.p += 1
if ((fieldset.U[(time, 0, 0, 1.0)] == u) and (1 == 1)):
particle.p += 1
if ((fieldset.U[(time, 0, 0, 1.0)] == fieldset.U[(time, 0, 0, 1.0)]) and (fieldset.U[(time, 0, 0, 1.0)] == fieldset.U[(time, 0, 0, 1.0)])):
particle.p += 1
if (fieldset.U[(time, 0, 0, 1.0)] == u):
particle.p += 1
else:
particle.p += 1000
if (fieldset.U[(time, 0, 0, 1.0)] == 3):
particle.p += 1000
else:
particle.p += 1
pset.execute(kernel, endtime=1.0, dt=1.0)
assert np.allclose(np.array([p.p for p in pset]), 7.0, rtol=1e-12)<|docstring|>Test combination of if and Field sampling commands<|endoftext|> |
f3285366ad648095b1a0bb19a56886e961324c9860cf0ab4cfaadf869622fe76 | @pytest.mark.parametrize('mode', ['scipy', pytest.param('jit', marks=pytest.mark.xfail(((sys.version_info >= (3, 0)) or (sys.platform == 'win32')), reason='py.test FD capturing does not work for jit on python3 or Win'))])
def test_print(fieldset, mode, capfd):
'Test print statements'
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.0)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0.5], lat=[0.5])
def kernel(particle, fieldset, time):
particle.p = fieldset.U[(time, particle.depth, particle.lat, particle.lon)]
tmp = 5
print(('%d %f %f' % (particle.id, particle.p, tmp)))
pset.execute(kernel, endtime=1.0, dt=1.0)
(out, err) = capfd.readouterr()
lst = out.split(' ')
tol = 1e-08
assert ((abs((float(lst[0]) - pset[0].id)) < tol) and (abs((float(lst[1]) - pset[0].p)) < tol) and (abs((float(lst[2]) - 5)) < tol)) | Test print statements | parcels/tests/test_kernel_language.py | test_print | pdnooteboom/NA_forams | 1 | python | @pytest.mark.parametrize('mode', ['scipy', pytest.param('jit', marks=pytest.mark.xfail(((sys.version_info >= (3, 0)) or (sys.platform == 'win32')), reason='py.test FD capturing does not work for jit on python3 or Win'))])
def test_print(fieldset, mode, capfd):
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.0)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0.5], lat=[0.5])
def kernel(particle, fieldset, time):
particle.p = fieldset.U[(time, particle.depth, particle.lat, particle.lon)]
tmp = 5
print(('%d %f %f' % (particle.id, particle.p, tmp)))
pset.execute(kernel, endtime=1.0, dt=1.0)
(out, err) = capfd.readouterr()
lst = out.split(' ')
tol = 1e-08
assert ((abs((float(lst[0]) - pset[0].id)) < tol) and (abs((float(lst[1]) - pset[0].p)) < tol) and (abs((float(lst[2]) - 5)) < tol)) | @pytest.mark.parametrize('mode', ['scipy', pytest.param('jit', marks=pytest.mark.xfail(((sys.version_info >= (3, 0)) or (sys.platform == 'win32')), reason='py.test FD capturing does not work for jit on python3 or Win'))])
def test_print(fieldset, mode, capfd):
class TestParticle(ptype[mode]):
p = Variable('p', dtype=np.float32, initial=0.0)
pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0.5], lat=[0.5])
def kernel(particle, fieldset, time):
particle.p = fieldset.U[(time, particle.depth, particle.lat, particle.lon)]
tmp = 5
print(('%d %f %f' % (particle.id, particle.p, tmp)))
pset.execute(kernel, endtime=1.0, dt=1.0)
(out, err) = capfd.readouterr()
lst = out.split(' ')
tol = 1e-08
assert ((abs((float(lst[0]) - pset[0].id)) < tol) and (abs((float(lst[1]) - pset[0].p)) < tol) and (abs((float(lst[2]) - 5)) < tol))<|docstring|>Test print statements<|endoftext|> |
8d8e43ca053aefcc952816a71d6574e67e836b8204428fd36a36068417551662 | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('rngfunc, rngargs', [('random', []), ('uniform', [0.0, 20.0]), ('randint', [0, 20])])
def test_random_float(fieldset, mode, rngfunc, rngargs, npart=10):
' Test basic random number generation '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=(np.float32 if (rngfunc == 'randint') else np.float32))
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
series = random_series(npart, rngfunc, rngargs, mode)
kernel = expr_kernel(('TestRandom_%s' % rngfunc), pset, ('random.%s(%s)' % (rngfunc, ', '.join([str(a) for a in rngargs]))))
pset.execute(kernel, endtime=1.0, dt=1.0)
assert np.allclose(np.array([p.p for p in pset]), series, rtol=1e-12) | Test basic random number generation | parcels/tests/test_kernel_language.py | test_random_float | pdnooteboom/NA_forams | 1 | python | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('rngfunc, rngargs', [('random', []), ('uniform', [0.0, 20.0]), ('randint', [0, 20])])
def test_random_float(fieldset, mode, rngfunc, rngargs, npart=10):
' '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=(np.float32 if (rngfunc == 'randint') else np.float32))
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
series = random_series(npart, rngfunc, rngargs, mode)
kernel = expr_kernel(('TestRandom_%s' % rngfunc), pset, ('random.%s(%s)' % (rngfunc, ', '.join([str(a) for a in rngargs]))))
pset.execute(kernel, endtime=1.0, dt=1.0)
assert np.allclose(np.array([p.p for p in pset]), series, rtol=1e-12) | @pytest.mark.parametrize('mode', ['scipy', 'jit'])
@pytest.mark.parametrize('rngfunc, rngargs', [('random', []), ('uniform', [0.0, 20.0]), ('randint', [0, 20])])
def test_random_float(fieldset, mode, rngfunc, rngargs, npart=10):
' '
class TestParticle(ptype[mode]):
p = Variable('p', dtype=(np.float32 if (rngfunc == 'randint') else np.float32))
pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=(np.zeros(npart) + 0.5))
series = random_series(npart, rngfunc, rngargs, mode)
kernel = expr_kernel(('TestRandom_%s' % rngfunc), pset, ('random.%s(%s)' % (rngfunc, ', '.join([str(a) for a in rngargs]))))
pset.execute(kernel, endtime=1.0, dt=1.0)
assert np.allclose(np.array([p.p for p in pset]), series, rtol=1e-12)<|docstring|>Test basic random number generation<|endoftext|> |
ec72a6e8b91612a8a8d121658a041a2601bc305660b12dd2135751d9753dc624 | def describe_download(self):
"\n Returns structured representation of component capabilities\n Component capabilities include a list of `acid_domains` which indicate for which\n domain of resources the service provides licensing for (i.e., 'Jamendo' domain means all\n resources identified by Jamendo:xxx)\n :return: tuple with (component name, dictionary with component capabilities)\n "
return (DOWNLOAD_COMPONENT, {ACID_DOMAINS_DESCRIPTION_KEYWORD: self.DOWNLOAD_ACID_DOMAINS}) | Returns structured representation of component capabilities
Component capabilities include a list of `acid_domains` which indicate for which
domain of resources the service provides licensing for (i.e., 'Jamendo' domain means all
resources identified by Jamendo:xxx)
:return: tuple with (component name, dictionary with component capabilities) | services/acservice/download.py | describe_download | FabianLauer/ac-mediator | 9 | python | def describe_download(self):
"\n Returns structured representation of component capabilities\n Component capabilities include a list of `acid_domains` which indicate for which\n domain of resources the service provides licensing for (i.e., 'Jamendo' domain means all\n resources identified by Jamendo:xxx)\n :return: tuple with (component name, dictionary with component capabilities)\n "
return (DOWNLOAD_COMPONENT, {ACID_DOMAINS_DESCRIPTION_KEYWORD: self.DOWNLOAD_ACID_DOMAINS}) | def describe_download(self):
"\n Returns structured representation of component capabilities\n Component capabilities include a list of `acid_domains` which indicate for which\n domain of resources the service provides licensing for (i.e., 'Jamendo' domain means all\n resources identified by Jamendo:xxx)\n :return: tuple with (component name, dictionary with component capabilities)\n "
return (DOWNLOAD_COMPONENT, {ACID_DOMAINS_DESCRIPTION_KEYWORD: self.DOWNLOAD_ACID_DOMAINS})<|docstring|>Returns structured representation of component capabilities
Component capabilities include a list of `acid_domains` which indicate for which
domain of resources the service provides licensing for (i.e., 'Jamendo' domain means all
resources identified by Jamendo:xxx)
:return: tuple with (component name, dictionary with component capabilities)<|endoftext|> |
402e8900b777f5cbba0560b42d71667d72bb5eaca2f71bfbacc7f80c513c503b | def get_download_url(self, context, acid, *args, **kwargs):
"\n Given an Audio Commons unique resource identifier (acid), this function returns a url\n where the resource can be downloaded by the client without the need of extra authentication.\n If the 3rd party service can't provide a link to download that resource or some other errors\n occur during the collection of the url, an AC exceptions should be raised.\n Individual services can extend this method with extra parameters to make it more suitable to their\n needs (e.g., to call the method given an already retrieved resource and avoid in this way an\n extra request).\n :param context: Dict with context information for the request (see api.views.get_request_context)\n :param acid: Audio Commons unique resource identifier\n :return: url to download the input resource (string)\n "
raise NotImplementedError('Service must implement method ACLicensingMixin.get_download_url') | Given an Audio Commons unique resource identifier (acid), this function returns a url
where the resource can be downloaded by the client without the need of extra authentication.
If the 3rd party service can't provide a link to download that resource or some other errors
occur during the collection of the url, an AC exceptions should be raised.
Individual services can extend this method with extra parameters to make it more suitable to their
needs (e.g., to call the method given an already retrieved resource and avoid in this way an
extra request).
:param context: Dict with context information for the request (see api.views.get_request_context)
:param acid: Audio Commons unique resource identifier
:return: url to download the input resource (string) | services/acservice/download.py | get_download_url | FabianLauer/ac-mediator | 9 | python | def get_download_url(self, context, acid, *args, **kwargs):
"\n Given an Audio Commons unique resource identifier (acid), this function returns a url\n where the resource can be downloaded by the client without the need of extra authentication.\n If the 3rd party service can't provide a link to download that resource or some other errors\n occur during the collection of the url, an AC exceptions should be raised.\n Individual services can extend this method with extra parameters to make it more suitable to their\n needs (e.g., to call the method given an already retrieved resource and avoid in this way an\n extra request).\n :param context: Dict with context information for the request (see api.views.get_request_context)\n :param acid: Audio Commons unique resource identifier\n :return: url to download the input resource (string)\n "
raise NotImplementedError('Service must implement method ACLicensingMixin.get_download_url') | def get_download_url(self, context, acid, *args, **kwargs):
"\n Given an Audio Commons unique resource identifier (acid), this function returns a url\n where the resource can be downloaded by the client without the need of extra authentication.\n If the 3rd party service can't provide a link to download that resource or some other errors\n occur during the collection of the url, an AC exceptions should be raised.\n Individual services can extend this method with extra parameters to make it more suitable to their\n needs (e.g., to call the method given an already retrieved resource and avoid in this way an\n extra request).\n :param context: Dict with context information for the request (see api.views.get_request_context)\n :param acid: Audio Commons unique resource identifier\n :return: url to download the input resource (string)\n "
raise NotImplementedError('Service must implement method ACLicensingMixin.get_download_url')<|docstring|>Given an Audio Commons unique resource identifier (acid), this function returns a url
where the resource can be downloaded by the client without the need of extra authentication.
If the 3rd party service can't provide a link to download that resource or some other errors
occur during the collection of the url, an AC exceptions should be raised.
Individual services can extend this method with extra parameters to make it more suitable to their
needs (e.g., to call the method given an already retrieved resource and avoid in this way an
extra request).
:param context: Dict with context information for the request (see api.views.get_request_context)
:param acid: Audio Commons unique resource identifier
:return: url to download the input resource (string)<|endoftext|> |
067f1541dd3717f6a2c47d5d235102bd5e2b049c7c322c9a471fc36e3b15d060 | def download(self, context, acid, *args, **kwargs):
"\n This endpoint returns a download url and raises warnings that might contain relevant\n information for the application. To get the URL, it uses 'get_download_url' method, therefore\n 'get_download_url' is the main method that should be overwritten by third party services.\n Raise warnings using the BaseACService.add_response_warning method.\n :param context: Dict with context information for the request (see api.views.get_request_context)\n :param acid: Audio Commons unique resource identifier\n :return: url where to download the resource\n "
return {'download_url': self.get_download_url(context, acid, *args, **kwargs)} | This endpoint returns a download url and raises warnings that might contain relevant
information for the application. To get the URL, it uses 'get_download_url' method, therefore
'get_download_url' is the main method that should be overwritten by third party services.
Raise warnings using the BaseACService.add_response_warning method.
:param context: Dict with context information for the request (see api.views.get_request_context)
:param acid: Audio Commons unique resource identifier
:return: url where to download the resource | services/acservice/download.py | download | FabianLauer/ac-mediator | 9 | python | def download(self, context, acid, *args, **kwargs):
"\n This endpoint returns a download url and raises warnings that might contain relevant\n information for the application. To get the URL, it uses 'get_download_url' method, therefore\n 'get_download_url' is the main method that should be overwritten by third party services.\n Raise warnings using the BaseACService.add_response_warning method.\n :param context: Dict with context information for the request (see api.views.get_request_context)\n :param acid: Audio Commons unique resource identifier\n :return: url where to download the resource\n "
return {'download_url': self.get_download_url(context, acid, *args, **kwargs)} | def download(self, context, acid, *args, **kwargs):
"\n This endpoint returns a download url and raises warnings that might contain relevant\n information for the application. To get the URL, it uses 'get_download_url' method, therefore\n 'get_download_url' is the main method that should be overwritten by third party services.\n Raise warnings using the BaseACService.add_response_warning method.\n :param context: Dict with context information for the request (see api.views.get_request_context)\n :param acid: Audio Commons unique resource identifier\n :return: url where to download the resource\n "
return {'download_url': self.get_download_url(context, acid, *args, **kwargs)}<|docstring|>This endpoint returns a download url and raises warnings that might contain relevant
information for the application. To get the URL, it uses 'get_download_url' method, therefore
'get_download_url' is the main method that should be overwritten by third party services.
Raise warnings using the BaseACService.add_response_warning method.
:param context: Dict with context information for the request (see api.views.get_request_context)
:param acid: Audio Commons unique resource identifier
:return: url where to download the resource<|endoftext|> |
3b0e9860d195ec8082554446d8c0668a722f635a83414c0d7b0358757c0152f8 | def next_page_token(self, response: requests.Response) -> Optional[Mapping[(str, Any)]]:
"\n TODO: Override this method to define a pagination strategy. If you will not be using pagination, no action is required - just return None.\n\n This method should return a Mapping (e.g: dict) containing whatever information required to make paginated requests. This dict is passed\n to most other methods in this class to help you form headers, request bodies, query params, etc..\n\n For example, if the API accepts a 'page' parameter to determine which page of the result to return, and a response from the API contains a\n 'page' number, then this method should probably return a dict {'page': response.json()['page'] + 1} to increment the page count by 1.\n The request_params method should then read the input next_page_token and set the 'page' param to next_page_token['page'].\n\n :param response: the most recent response from the API\n :return If there is another page in the result, a mapping (e.g: dict) containing information needed to query the next page in the response.\n If there are no more pages in the result, return None.\n "
return None | TODO: Override this method to define a pagination strategy. If you will not be using pagination, no action is required - just return None.
This method should return a Mapping (e.g: dict) containing whatever information required to make paginated requests. This dict is passed
to most other methods in this class to help you form headers, request bodies, query params, etc..
For example, if the API accepts a 'page' parameter to determine which page of the result to return, and a response from the API contains a
'page' number, then this method should probably return a dict {'page': response.json()['page'] + 1} to increment the page count by 1.
The request_params method should then read the input next_page_token and set the 'page' param to next_page_token['page'].
:param response: the most recent response from the API
:return If there is another page in the result, a mapping (e.g: dict) containing information needed to query the next page in the response.
If there are no more pages in the result, return None. | airbyte-integrations/connectors/source-ose-realtime/source_ose_realtime/source.py | next_page_token | NMWDI/airbyte | 1 | python | def next_page_token(self, response: requests.Response) -> Optional[Mapping[(str, Any)]]:
"\n TODO: Override this method to define a pagination strategy. If you will not be using pagination, no action is required - just return None.\n\n This method should return a Mapping (e.g: dict) containing whatever information required to make paginated requests. This dict is passed\n to most other methods in this class to help you form headers, request bodies, query params, etc..\n\n For example, if the API accepts a 'page' parameter to determine which page of the result to return, and a response from the API contains a\n 'page' number, then this method should probably return a dict {'page': response.json()['page'] + 1} to increment the page count by 1.\n The request_params method should then read the input next_page_token and set the 'page' param to next_page_token['page'].\n\n :param response: the most recent response from the API\n :return If there is another page in the result, a mapping (e.g: dict) containing information needed to query the next page in the response.\n If there are no more pages in the result, return None.\n "
return None | def next_page_token(self, response: requests.Response) -> Optional[Mapping[(str, Any)]]:
"\n TODO: Override this method to define a pagination strategy. If you will not be using pagination, no action is required - just return None.\n\n This method should return a Mapping (e.g: dict) containing whatever information required to make paginated requests. This dict is passed\n to most other methods in this class to help you form headers, request bodies, query params, etc..\n\n For example, if the API accepts a 'page' parameter to determine which page of the result to return, and a response from the API contains a\n 'page' number, then this method should probably return a dict {'page': response.json()['page'] + 1} to increment the page count by 1.\n The request_params method should then read the input next_page_token and set the 'page' param to next_page_token['page'].\n\n :param response: the most recent response from the API\n :return If there is another page in the result, a mapping (e.g: dict) containing information needed to query the next page in the response.\n If there are no more pages in the result, return None.\n "
return None<|docstring|>TODO: Override this method to define a pagination strategy. If you will not be using pagination, no action is required - just return None.
This method should return a Mapping (e.g: dict) containing whatever information required to make paginated requests. This dict is passed
to most other methods in this class to help you form headers, request bodies, query params, etc..
For example, if the API accepts a 'page' parameter to determine which page of the result to return, and a response from the API contains a
'page' number, then this method should probably return a dict {'page': response.json()['page'] + 1} to increment the page count by 1.
The request_params method should then read the input next_page_token and set the 'page' param to next_page_token['page'].
:param response: the most recent response from the API
:return If there is another page in the result, a mapping (e.g: dict) containing information needed to query the next page in the response.
If there are no more pages in the result, return None.<|endoftext|> |
f19b3cbc72caa64811359a89185c430c217581d6fa33e5112eb166214be5f34b | def request_params(self, stream_state: Mapping[(str, Any)], stream_slice: Mapping[(str, any)]=None, next_page_token: Mapping[(str, Any)]=None) -> MutableMapping[(str, Any)]:
"\n TODO: Override this method to define any query parameters to be set. Remove this method if you don't need to define request params.\n Usually contains common params e.g. pagination size etc.\n "
return {} | TODO: Override this method to define any query parameters to be set. Remove this method if you don't need to define request params.
Usually contains common params e.g. pagination size etc. | airbyte-integrations/connectors/source-ose-realtime/source_ose_realtime/source.py | request_params | NMWDI/airbyte | 1 | python | def request_params(self, stream_state: Mapping[(str, Any)], stream_slice: Mapping[(str, any)]=None, next_page_token: Mapping[(str, Any)]=None) -> MutableMapping[(str, Any)]:
"\n TODO: Override this method to define any query parameters to be set. Remove this method if you don't need to define request params.\n Usually contains common params e.g. pagination size etc.\n "
return {} | def request_params(self, stream_state: Mapping[(str, Any)], stream_slice: Mapping[(str, any)]=None, next_page_token: Mapping[(str, Any)]=None) -> MutableMapping[(str, Any)]:
"\n TODO: Override this method to define any query parameters to be set. Remove this method if you don't need to define request params.\n Usually contains common params e.g. pagination size etc.\n "
return {}<|docstring|>TODO: Override this method to define any query parameters to be set. Remove this method if you don't need to define request params.
Usually contains common params e.g. pagination size etc.<|endoftext|> |
36bbe6bb0ec6dc887d5f8cbdaeacde7e9c2914125a7b51b15faaebd50d00bd79 | def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
'\n TODO: Override this method to define how a response is parsed.\n :return an iterable containing each record in the response\n '
(yield from response.json()) | TODO: Override this method to define how a response is parsed.
:return an iterable containing each record in the response | airbyte-integrations/connectors/source-ose-realtime/source_ose_realtime/source.py | parse_response | NMWDI/airbyte | 1 | python | def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
'\n TODO: Override this method to define how a response is parsed.\n :return an iterable containing each record in the response\n '
(yield from response.json()) | def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
'\n TODO: Override this method to define how a response is parsed.\n :return an iterable containing each record in the response\n '
(yield from response.json())<|docstring|>TODO: Override this method to define how a response is parsed.
:return an iterable containing each record in the response<|endoftext|> |
06f6a890d78b023065d2fbd8041e6d33e730228307941b1535cf325e75b35a04 | @property
def cursor_field(self) -> str:
"\n TODO\n Override to return the cursor field used by this stream e.g: an API entity might always use created_at as the cursor field. This is\n usually id or date based. This field's presence tells the framework this in an incremental stream. Required for incremental.\n\n :return str: The name of the cursor field.\n "
return [] | TODO
Override to return the cursor field used by this stream e.g: an API entity might always use created_at as the cursor field. This is
usually id or date based. This field's presence tells the framework this in an incremental stream. Required for incremental.
:return str: The name of the cursor field. | airbyte-integrations/connectors/source-ose-realtime/source_ose_realtime/source.py | cursor_field | NMWDI/airbyte | 1 | python | @property
def cursor_field(self) -> str:
"\n TODO\n Override to return the cursor field used by this stream e.g: an API entity might always use created_at as the cursor field. This is\n usually id or date based. This field's presence tells the framework this in an incremental stream. Required for incremental.\n\n :return str: The name of the cursor field.\n "
return [] | @property
def cursor_field(self) -> str:
"\n TODO\n Override to return the cursor field used by this stream e.g: an API entity might always use created_at as the cursor field. This is\n usually id or date based. This field's presence tells the framework this in an incremental stream. Required for incremental.\n\n :return str: The name of the cursor field.\n "
return []<|docstring|>TODO
Override to return the cursor field used by this stream e.g: an API entity might always use created_at as the cursor field. This is
usually id or date based. This field's presence tells the framework this in an incremental stream. Required for incremental.
:return str: The name of the cursor field.<|endoftext|> |
32a605a84f260d643336018f745ed2bdec45e05d8dbdb9d06a221c28c1f98163 | def get_updated_state(self, current_stream_state: MutableMapping[(str, Any)], latest_record: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
"\n Override to determine the latest state after reading the latest record. This typically compared the cursor_field from the latest record and\n the current state and picks the 'most' recent cursor. This is how a stream's state is determined. Required for incremental.\n "
return {'Station_ID': latest_record['Station_ID'], 'timestamp': latest_record[self.cursor_field]} | Override to determine the latest state after reading the latest record. This typically compared the cursor_field from the latest record and
the current state and picks the 'most' recent cursor. This is how a stream's state is determined. Required for incremental. | airbyte-integrations/connectors/source-ose-realtime/source_ose_realtime/source.py | get_updated_state | NMWDI/airbyte | 1 | python | def get_updated_state(self, current_stream_state: MutableMapping[(str, Any)], latest_record: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
"\n Override to determine the latest state after reading the latest record. This typically compared the cursor_field from the latest record and\n the current state and picks the 'most' recent cursor. This is how a stream's state is determined. Required for incremental.\n "
return {'Station_ID': latest_record['Station_ID'], 'timestamp': latest_record[self.cursor_field]} | def get_updated_state(self, current_stream_state: MutableMapping[(str, Any)], latest_record: Mapping[(str, Any)]) -> Mapping[(str, Any)]:
"\n Override to determine the latest state after reading the latest record. This typically compared the cursor_field from the latest record and\n the current state and picks the 'most' recent cursor. This is how a stream's state is determined. Required for incremental.\n "
return {'Station_ID': latest_record['Station_ID'], 'timestamp': latest_record[self.cursor_field]}<|docstring|>Override to determine the latest state after reading the latest record. This typically compared the cursor_field from the latest record and
the current state and picks the 'most' recent cursor. This is how a stream's state is determined. Required for incremental.<|endoftext|> |
1f6c56ee945d425796f6062f0daefc6ea0bf1d354a29c9ccb51f361f88da5f93 | def path(self, **kwargs) -> str:
'\n TODO: Override this method to define the path this stream corresponds to. E.g. if the url is https://example-api.com/v1/employees then this should\n return "single". Required.\n '
station_id = self.get_station_id()
dt = (datetime.datetime.now() - datetime.timedelta(days=30)).strftime('%Y-%m-%d')
return f'meas_readings/{station_id}/{dt}' | TODO: Override this method to define the path this stream corresponds to. E.g. if the url is https://example-api.com/v1/employees then this should
return "single". Required. | airbyte-integrations/connectors/source-ose-realtime/source_ose_realtime/source.py | path | NMWDI/airbyte | 1 | python | def path(self, **kwargs) -> str:
'\n TODO: Override this method to define the path this stream corresponds to. E.g. if the url is https://example-api.com/v1/employees then this should\n return "single". Required.\n '
station_id = self.get_station_id()
dt = (datetime.datetime.now() - datetime.timedelta(days=30)).strftime('%Y-%m-%d')
return f'meas_readings/{station_id}/{dt}' | def path(self, **kwargs) -> str:
'\n TODO: Override this method to define the path this stream corresponds to. E.g. if the url is https://example-api.com/v1/employees then this should\n return "single". Required.\n '
station_id = self.get_station_id()
dt = (datetime.datetime.now() - datetime.timedelta(days=30)).strftime('%Y-%m-%d')
return f'meas_readings/{station_id}/{dt}'<|docstring|>TODO: Override this method to define the path this stream corresponds to. E.g. if the url is https://example-api.com/v1/employees then this should
return "single". Required.<|endoftext|> |
81a4986de2c6ae8b68bc8a48e01baed187b01a5b7f14119a4b447ef8f545723e | def check_connection(self, logger, config) -> Tuple[(bool, any)]:
"\n TODO: Implement a connection check to validate that the user-provided config can be used to connect to the underlying API\n\n See https://github.com/airbytehq/airbyte/blob/master/airbyte-integrations/connectors/source-stripe/source_stripe/source.py#L232\n for an example.\n\n :param config: the user-input config object conforming to the connector's spec.json\n :param logger: logger object\n :return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.\n "
return (True, None) | TODO: Implement a connection check to validate that the user-provided config can be used to connect to the underlying API
See https://github.com/airbytehq/airbyte/blob/master/airbyte-integrations/connectors/source-stripe/source_stripe/source.py#L232
for an example.
:param config: the user-input config object conforming to the connector's spec.json
:param logger: logger object
:return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise. | airbyte-integrations/connectors/source-ose-realtime/source_ose_realtime/source.py | check_connection | NMWDI/airbyte | 1 | python | def check_connection(self, logger, config) -> Tuple[(bool, any)]:
"\n TODO: Implement a connection check to validate that the user-provided config can be used to connect to the underlying API\n\n See https://github.com/airbytehq/airbyte/blob/master/airbyte-integrations/connectors/source-stripe/source_stripe/source.py#L232\n for an example.\n\n :param config: the user-input config object conforming to the connector's spec.json\n :param logger: logger object\n :return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.\n "
return (True, None) | def check_connection(self, logger, config) -> Tuple[(bool, any)]:
"\n TODO: Implement a connection check to validate that the user-provided config can be used to connect to the underlying API\n\n See https://github.com/airbytehq/airbyte/blob/master/airbyte-integrations/connectors/source-stripe/source_stripe/source.py#L232\n for an example.\n\n :param config: the user-input config object conforming to the connector's spec.json\n :param logger: logger object\n :return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.\n "
return (True, None)<|docstring|>TODO: Implement a connection check to validate that the user-provided config can be used to connect to the underlying API
See https://github.com/airbytehq/airbyte/blob/master/airbyte-integrations/connectors/source-stripe/source_stripe/source.py#L232
for an example.
:param config: the user-input config object conforming to the connector's spec.json
:param logger: logger object
:return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.<|endoftext|> |
9210225f3380745d03598f1db10ce5636933368d6ce41af8be8654217febfaac | def streams(self, config: Mapping[(str, Any)]) -> List[Stream]:
'\n TODO: Replace the streams below with your own streams.\n\n :param config: A Mapping of the user input configuration as defined in the connector spec.\n '
auth = TokenAuthenticator(token='api_key')
return [MeasReadings(authenticator=auth)] | TODO: Replace the streams below with your own streams.
:param config: A Mapping of the user input configuration as defined in the connector spec. | airbyte-integrations/connectors/source-ose-realtime/source_ose_realtime/source.py | streams | NMWDI/airbyte | 1 | python | def streams(self, config: Mapping[(str, Any)]) -> List[Stream]:
'\n TODO: Replace the streams below with your own streams.\n\n :param config: A Mapping of the user input configuration as defined in the connector spec.\n '
auth = TokenAuthenticator(token='api_key')
return [MeasReadings(authenticator=auth)] | def streams(self, config: Mapping[(str, Any)]) -> List[Stream]:
'\n TODO: Replace the streams below with your own streams.\n\n :param config: A Mapping of the user input configuration as defined in the connector spec.\n '
auth = TokenAuthenticator(token='api_key')
return [MeasReadings(authenticator=auth)]<|docstring|>TODO: Replace the streams below with your own streams.
:param config: A Mapping of the user input configuration as defined in the connector spec.<|endoftext|> |
dcb144d6036a7713e2d1389ee317a4e5eca46a01bb3bf1bafccc276f227797a1 | def check_event(event_list: list[str]):
'\n 检查事件\n '
async def _check_event(bot: 'Bot', event: 'Event', state: T_State) -> bool:
return (event.get_event_name() in event_list)
return Rule(_check_event) | 检查事件 | src/managers/admin_manager/data_source.py | check_event | 490720818/jx3_bot | 22 | python | def check_event(event_list: list[str]):
'\n \n '
async def _check_event(bot: 'Bot', event: 'Event', state: T_State) -> bool:
return (event.get_event_name() in event_list)
return Rule(_check_event) | def check_event(event_list: list[str]):
'\n \n '
async def _check_event(bot: 'Bot', event: 'Event', state: T_State) -> bool:
return (event.get_event_name() in event_list)
return Rule(_check_event)<|docstring|>检查事件<|endoftext|> |
28472214c6cb3895e11ab12f6695dda01bcc7e303fff1bc6236725ec42a050fc | async def get_bot_owner(bot_id: int) -> Optional[int]:
'获取机器人管理员账号'
owner = (await BotInfo.get_owner(bot_id))
return owner | 获取机器人管理员账号 | src/managers/admin_manager/data_source.py | get_bot_owner | 490720818/jx3_bot | 22 | python | async def get_bot_owner(bot_id: int) -> Optional[int]:
owner = (await BotInfo.get_owner(bot_id))
return owner | async def get_bot_owner(bot_id: int) -> Optional[int]:
owner = (await BotInfo.get_owner(bot_id))
return owner<|docstring|>获取机器人管理员账号<|endoftext|> |
05b1f4d8814bc2c2a70482365281751987665093b5e2168c10f750ab772963e8 | async def set_robot_status(bot_id: int, group_id: int, status: bool) -> bool:
'设置机器人开关'
return (await GroupInfo.set_robot_status(bot_id, group_id, status)) | 设置机器人开关 | src/managers/admin_manager/data_source.py | set_robot_status | 490720818/jx3_bot | 22 | python | async def set_robot_status(bot_id: int, group_id: int, status: bool) -> bool:
return (await GroupInfo.set_robot_status(bot_id, group_id, status)) | async def set_robot_status(bot_id: int, group_id: int, status: bool) -> bool:
return (await GroupInfo.set_robot_status(bot_id, group_id, status))<|docstring|>设置机器人开关<|endoftext|> |
af4b4b7f5166a382addff3543a04bf8011e577f062e97115aa590b30b31d2cf1 | async def get_all_data(bot_id: int) -> list[dict]:
'\n :返回所有数据,dict字段:\n * group_id:群号\n * group_name:群名\n * sign_nums:签到数\n * server:服务器名\n * robot_status:运行状态\n * active:活跃值\n '
return (await GroupInfo.get_all_data(bot_id)) | :返回所有数据,dict字段:
* group_id:群号
* group_name:群名
* sign_nums:签到数
* server:服务器名
* robot_status:运行状态
* active:活跃值 | src/managers/admin_manager/data_source.py | get_all_data | 490720818/jx3_bot | 22 | python | async def get_all_data(bot_id: int) -> list[dict]:
'\n :返回所有数据,dict字段:\n * group_id:群号\n * group_name:群名\n * sign_nums:签到数\n * server:服务器名\n * robot_status:运行状态\n * active:活跃值\n '
return (await GroupInfo.get_all_data(bot_id)) | async def get_all_data(bot_id: int) -> list[dict]:
'\n :返回所有数据,dict字段:\n * group_id:群号\n * group_name:群名\n * sign_nums:签到数\n * server:服务器名\n * robot_status:运行状态\n * active:活跃值\n '
return (await GroupInfo.get_all_data(bot_id))<|docstring|>:返回所有数据,dict字段:
* group_id:群号
* group_name:群名
* sign_nums:签到数
* server:服务器名
* robot_status:运行状态
* active:活跃值<|endoftext|> |
0f923f0f53965c9b6ea3ce9ccac6028060b0f8aa6ca7faf7881592bbf0f34734 | def get_text_num(text: str) -> Tuple[(bool, int)]:
'从信息中获取开关,群号'
_status = text.split(' ')[0]
_group_id = text.split(' ')[1]
status = (_status == '打开')
group_id = int(_group_id)
return (status, group_id) | 从信息中获取开关,群号 | src/managers/admin_manager/data_source.py | get_text_num | 490720818/jx3_bot | 22 | python | def get_text_num(text: str) -> Tuple[(bool, int)]:
_status = text.split(' ')[0]
_group_id = text.split(' ')[1]
status = (_status == '打开')
group_id = int(_group_id)
return (status, group_id) | def get_text_num(text: str) -> Tuple[(bool, int)]:
_status = text.split(' ')[0]
_group_id = text.split(' ')[1]
status = (_status == '打开')
group_id = int(_group_id)
return (status, group_id)<|docstring|>从信息中获取开关,群号<|endoftext|> |
5aca6291e13ccaa6cce0fce75d6857bb4b7a5dbc61a5ebdbd1c2f06fccd572bc | async def change_status_all(bot_id: int, status: bool) -> None:
'设置所有状态'
(await GroupInfo.change_status_all(bot_id, status)) | 设置所有状态 | src/managers/admin_manager/data_source.py | change_status_all | 490720818/jx3_bot | 22 | python | async def change_status_all(bot_id: int, status: bool) -> None:
(await GroupInfo.change_status_all(bot_id, status)) | async def change_status_all(bot_id: int, status: bool) -> None:
(await GroupInfo.change_status_all(bot_id, status))<|docstring|>设置所有状态<|endoftext|> |
d7c4f480177f3ad8485a3b975772bff2716c20aa4f038d3ca63f48a291c72690 | async def leave_group(bot_id: int, group_id: int) -> Tuple[(bool, str)]:
'退群,返回[成功flag,群名]'
group_name = (await GroupInfo.get_group_name(bot_id, group_id))
if (group_name is None):
group_name = ''
return (False, group_name)
(await GroupInfo.delete_one(bot_id=bot_id, group_id=group_id))
(await UserInfo.delete_group(bot_id=bot_id, group_id=group_id))
return (True, group_name) | 退群,返回[成功flag,群名] | src/managers/admin_manager/data_source.py | leave_group | 490720818/jx3_bot | 22 | python | async def leave_group(bot_id: int, group_id: int) -> Tuple[(bool, str)]:
group_name = (await GroupInfo.get_group_name(bot_id, group_id))
if (group_name is None):
group_name =
return (False, group_name)
(await GroupInfo.delete_one(bot_id=bot_id, group_id=group_id))
(await UserInfo.delete_group(bot_id=bot_id, group_id=group_id))
return (True, group_name) | async def leave_group(bot_id: int, group_id: int) -> Tuple[(bool, str)]:
group_name = (await GroupInfo.get_group_name(bot_id, group_id))
if (group_name is None):
group_name =
return (False, group_name)
(await GroupInfo.delete_one(bot_id=bot_id, group_id=group_id))
(await UserInfo.delete_group(bot_id=bot_id, group_id=group_id))
return (True, group_name)<|docstring|>退群,返回[成功flag,群名]<|endoftext|> |
d29067e9a79be1b87d0db693d5e794a6be94d1be3cf2efd35eb4303f7091339d | async def get_reply_jx3(question: str, nickname: str) -> Optional[str]:
'\n 使用jx3_api获取回复\n '
chat_nlp = config.get('chat_nlp')
if ((chat_nlp['secretId'] is None) or (chat_nlp['secretKey'] is None)):
log = 'jx3_api接口参数不足,无法请求。'
logger.debug(log)
return None
jx3_url: str = config.get('jx3-api').get('jx3-url')
url = f'{jx3_url}/share/nlpchat'
params = chat_nlp.copy()
params['name'] = nickname
params['question'] = question
async with httpx.AsyncClient(headers=get_user_agent()) as client:
try:
req_url = (await client.get(url=url, params=params))
req = req_url.json()
if (req['code'] == 200):
log = 'jx3API请求成功。'
logger.debug(log)
data = req['data']
return data['answer']
else:
log = f"jx3API请求失败:{req['msg']}"
logger.debug(log)
return None
except Exception as e:
log = f'API访问失败:{str(e)}'
logger.error(log)
return None | 使用jx3_api获取回复 | src/managers/admin_manager/data_source.py | get_reply_jx3 | 490720818/jx3_bot | 22 | python | async def get_reply_jx3(question: str, nickname: str) -> Optional[str]:
'\n \n '
chat_nlp = config.get('chat_nlp')
if ((chat_nlp['secretId'] is None) or (chat_nlp['secretKey'] is None)):
log = 'jx3_api接口参数不足,无法请求。'
logger.debug(log)
return None
jx3_url: str = config.get('jx3-api').get('jx3-url')
url = f'{jx3_url}/share/nlpchat'
params = chat_nlp.copy()
params['name'] = nickname
params['question'] = question
async with httpx.AsyncClient(headers=get_user_agent()) as client:
try:
req_url = (await client.get(url=url, params=params))
req = req_url.json()
if (req['code'] == 200):
log = 'jx3API请求成功。'
logger.debug(log)
data = req['data']
return data['answer']
else:
log = f"jx3API请求失败:{req['msg']}"
logger.debug(log)
return None
except Exception as e:
log = f'API访问失败:{str(e)}'
logger.error(log)
return None | async def get_reply_jx3(question: str, nickname: str) -> Optional[str]:
'\n \n '
chat_nlp = config.get('chat_nlp')
if ((chat_nlp['secretId'] is None) or (chat_nlp['secretKey'] is None)):
log = 'jx3_api接口参数不足,无法请求。'
logger.debug(log)
return None
jx3_url: str = config.get('jx3-api').get('jx3-url')
url = f'{jx3_url}/share/nlpchat'
params = chat_nlp.copy()
params['name'] = nickname
params['question'] = question
async with httpx.AsyncClient(headers=get_user_agent()) as client:
try:
req_url = (await client.get(url=url, params=params))
req = req_url.json()
if (req['code'] == 200):
log = 'jx3API请求成功。'
logger.debug(log)
data = req['data']
return data['answer']
else:
log = f"jx3API请求失败:{req['msg']}"
logger.debug(log)
return None
except Exception as e:
log = f'API访问失败:{str(e)}'
logger.error(log)
return None<|docstring|>使用jx3_api获取回复<|endoftext|> |
9db0e32ecf495424735308a37971d8badec1fe81011118d796af743ad3711fd3 | async def get_reply_qingyunke(text: str, nickname: str) -> Optional[str]:
'\n :说明\n 获取聊天结果,使用青云客的API,备胎\n\n :参数\n * text:聊天内容\n\n :返回\n * str:聊天结果\n\n :异常\n * NetworkError, Exception\n '
params = {'key': 'free', 'appid': 0, 'msg': text}
url = 'http://api.qingyunke.com/api.php'
async with httpx.AsyncClient(headers=get_user_agent()) as client:
try:
req_url = (await client.get(url, params=params))
req = req_url.json()
if (req['result'] == 0):
msg = str(req['content'])
msg = msg.replace('{br}', '\n')
msg = msg.replace('菲菲', nickname)
log = '请求青云客API成功。'
logger.debug(log)
return msg
else:
e = req['content']
log = f'青云客API请求失败:{e}'
logger.error(log)
return None
except Exception as e:
log = f'青云客API访问失败:{str(e)}'
logger.error(log)
return None | :说明
获取聊天结果,使用青云客的API,备胎
:参数
* text:聊天内容
:返回
* str:聊天结果
:异常
* NetworkError, Exception | src/managers/admin_manager/data_source.py | get_reply_qingyunke | 490720818/jx3_bot | 22 | python | async def get_reply_qingyunke(text: str, nickname: str) -> Optional[str]:
'\n :说明\n 获取聊天结果,使用青云客的API,备胎\n\n :参数\n * text:聊天内容\n\n :返回\n * str:聊天结果\n\n :异常\n * NetworkError, Exception\n '
params = {'key': 'free', 'appid': 0, 'msg': text}
url = 'http://api.qingyunke.com/api.php'
async with httpx.AsyncClient(headers=get_user_agent()) as client:
try:
req_url = (await client.get(url, params=params))
req = req_url.json()
if (req['result'] == 0):
msg = str(req['content'])
msg = msg.replace('{br}', '\n')
msg = msg.replace('菲菲', nickname)
log = '请求青云客API成功。'
logger.debug(log)
return msg
else:
e = req['content']
log = f'青云客API请求失败:{e}'
logger.error(log)
return None
except Exception as e:
log = f'青云客API访问失败:{str(e)}'
logger.error(log)
return None | async def get_reply_qingyunke(text: str, nickname: str) -> Optional[str]:
'\n :说明\n 获取聊天结果,使用青云客的API,备胎\n\n :参数\n * text:聊天内容\n\n :返回\n * str:聊天结果\n\n :异常\n * NetworkError, Exception\n '
params = {'key': 'free', 'appid': 0, 'msg': text}
url = 'http://api.qingyunke.com/api.php'
async with httpx.AsyncClient(headers=get_user_agent()) as client:
try:
req_url = (await client.get(url, params=params))
req = req_url.json()
if (req['result'] == 0):
msg = str(req['content'])
msg = msg.replace('{br}', '\n')
msg = msg.replace('菲菲', nickname)
log = '请求青云客API成功。'
logger.debug(log)
return msg
else:
e = req['content']
log = f'青云客API请求失败:{e}'
logger.error(log)
return None
except Exception as e:
log = f'青云客API访问失败:{str(e)}'
logger.error(log)
return None<|docstring|>:说明
获取聊天结果,使用青云客的API,备胎
:参数
* text:聊天内容
:返回
* str:聊天结果
:异常
* NetworkError, Exception<|endoftext|> |
bbf99ed721d7298e68da782ea8943a94ae03aff3c81409c573aaec78c45b185e | async def get_robot_status(bot_id: int, group_id: int) -> Optional[bool]:
'获取机器人开关'
robot_status = (await GroupInfo.get_robot_status(bot_id=bot_id, group_id=group_id))
return robot_status | 获取机器人开关 | src/managers/admin_manager/data_source.py | get_robot_status | 490720818/jx3_bot | 22 | python | async def get_robot_status(bot_id: int, group_id: int) -> Optional[bool]:
robot_status = (await GroupInfo.get_robot_status(bot_id=bot_id, group_id=group_id))
return robot_status | async def get_robot_status(bot_id: int, group_id: int) -> Optional[bool]:
robot_status = (await GroupInfo.get_robot_status(bot_id=bot_id, group_id=group_id))
return robot_status<|docstring|>获取机器人开关<|endoftext|> |
73d41557bbcc1a08adb92ed711b6d70f19e74c4ddd847bdbc7e52d64fe21bd40 | def handle_borad_message(all: bool, one_message: MessageSegment) -> Tuple[(MessageSegment, Optional[int])]:
'\n 处理广播消息第一条参数问题,非全体广播会返回group_id\n '
text: str = one_message.data['text']
if all:
req_text = text[5:]
req_msg = MessageSegment.text(req_text)
req_group_id = None
else:
text_list = text.split(' ')
req_group_id = int(text_list[1])
if (len(text_list) > 2):
req_text = ' '
req_text = req_text.join(text_list[2:])
else:
req_text = ''
req_msg = MessageSegment.text(req_text)
return (req_msg, req_group_id) | 处理广播消息第一条参数问题,非全体广播会返回group_id | src/managers/admin_manager/data_source.py | handle_borad_message | 490720818/jx3_bot | 22 | python | def handle_borad_message(all: bool, one_message: MessageSegment) -> Tuple[(MessageSegment, Optional[int])]:
'\n \n '
text: str = one_message.data['text']
if all:
req_text = text[5:]
req_msg = MessageSegment.text(req_text)
req_group_id = None
else:
text_list = text.split(' ')
req_group_id = int(text_list[1])
if (len(text_list) > 2):
req_text = ' '
req_text = req_text.join(text_list[2:])
else:
req_text =
req_msg = MessageSegment.text(req_text)
return (req_msg, req_group_id) | def handle_borad_message(all: bool, one_message: MessageSegment) -> Tuple[(MessageSegment, Optional[int])]:
'\n \n '
text: str = one_message.data['text']
if all:
req_text = text[5:]
req_msg = MessageSegment.text(req_text)
req_group_id = None
else:
text_list = text.split(' ')
req_group_id = int(text_list[1])
if (len(text_list) > 2):
req_text = ' '
req_text = req_text.join(text_list[2:])
else:
req_text =
req_msg = MessageSegment.text(req_text)
return (req_msg, req_group_id)<|docstring|>处理广播消息第一条参数问题,非全体广播会返回group_id<|endoftext|> |
f895e82cba1c42b5810b76cde561190fefdda8b3eb75d129163a74518dcbcfda | async def set_bot_nickname(bot_id: str, nickname: str):
'设置昵称'
(await BotInfo.set_nickname(int(bot_id), nickname))
bot = get_bot(self_id=bot_id)
bot.config.nickname = [nickname] | 设置昵称 | src/managers/admin_manager/data_source.py | set_bot_nickname | 490720818/jx3_bot | 22 | python | async def set_bot_nickname(bot_id: str, nickname: str):
(await BotInfo.set_nickname(int(bot_id), nickname))
bot = get_bot(self_id=bot_id)
bot.config.nickname = [nickname] | async def set_bot_nickname(bot_id: str, nickname: str):
(await BotInfo.set_nickname(int(bot_id), nickname))
bot = get_bot(self_id=bot_id)
bot.config.nickname = [nickname]<|docstring|>设置昵称<|endoftext|> |
bcbd182ef7336cf175a73a9f0b93c281a0a908112f9ac8cc4a556c0eb1e6c7c9 | async def add_token(bot_id: int, token: str) -> bool:
'增加一条token'
return (await TokenInfo.append_token(bot_id, token)) | 增加一条token | src/managers/admin_manager/data_source.py | add_token | 490720818/jx3_bot | 22 | python | async def add_token(bot_id: int, token: str) -> bool:
return (await TokenInfo.append_token(bot_id, token)) | async def add_token(bot_id: int, token: str) -> bool:
return (await TokenInfo.append_token(bot_id, token))<|docstring|>增加一条token<|endoftext|> |
c062c011e89c58533ffbbe49226063fb43a7ddf5da675c93949aaf750412a767 | async def get_token(bot_id: int) -> list[dict]:
'获取token'
return (await TokenInfo.get_token(bot_id)) | 获取token | src/managers/admin_manager/data_source.py | get_token | 490720818/jx3_bot | 22 | python | async def get_token(bot_id: int) -> list[dict]:
return (await TokenInfo.get_token(bot_id)) | async def get_token(bot_id: int) -> list[dict]:
return (await TokenInfo.get_token(bot_id))<|docstring|>获取token<|endoftext|> |
4b1c2ed5e4d0323bbd74ce618fffe9ed3aee40fdf0327fcdeb6ed331ed90d5f0 | async def remove_token(bot_id: int, token: str) -> bool:
'删除一条token'
return (await TokenInfo.remove_token(bot_id, token)) | 删除一条token | src/managers/admin_manager/data_source.py | remove_token | 490720818/jx3_bot | 22 | python | async def remove_token(bot_id: int, token: str) -> bool:
return (await TokenInfo.remove_token(bot_id, token)) | async def remove_token(bot_id: int, token: str) -> bool:
return (await TokenInfo.remove_token(bot_id, token))<|docstring|>删除一条token<|endoftext|> |
7b9d142557790d4efb24dbe57ca1ab538b56eb57fc460e176615f38a10015acc | async def get_bot_group_list(bot_id: int) -> list[int]:
'获取机器人开启群组名单'
group_list = (await GroupInfo.get_group_list(bot_id))
return group_list | 获取机器人开启群组名单 | src/managers/admin_manager/data_source.py | get_bot_group_list | 490720818/jx3_bot | 22 | python | async def get_bot_group_list(bot_id: int) -> list[int]:
group_list = (await GroupInfo.get_group_list(bot_id))
return group_list | async def get_bot_group_list(bot_id: int) -> list[int]:
group_list = (await GroupInfo.get_group_list(bot_id))
return group_list<|docstring|>获取机器人开启群组名单<|endoftext|> |
b7c4d10aa79c9e79a4d26fff34a3a60a0e884e1e32bdd3845b99d7fbb5d86a3a | async def check_token(ticket: str) -> Tuple[(bool, str)]:
'检查token有效性'
url = (config.get('jx3-api').get('jx3-url') + '/token/validity')
token = config.get('jx3-api').get('jx3-token')
params = {'token': token, 'ticket': ticket}
async with httpx.AsyncClient(headers=get_user_agent()) as client:
try:
req_url = (await client.get(url=url, params=params))
req = req_url.json()
code = req['code']
msg = req['msg']
return ((code == 200), msg)
except Exception as e:
return (False, str(e)) | 检查token有效性 | src/managers/admin_manager/data_source.py | check_token | 490720818/jx3_bot | 22 | python | async def check_token(ticket: str) -> Tuple[(bool, str)]:
url = (config.get('jx3-api').get('jx3-url') + '/token/validity')
token = config.get('jx3-api').get('jx3-token')
params = {'token': token, 'ticket': ticket}
async with httpx.AsyncClient(headers=get_user_agent()) as client:
try:
req_url = (await client.get(url=url, params=params))
req = req_url.json()
code = req['code']
msg = req['msg']
return ((code == 200), msg)
except Exception as e:
return (False, str(e)) | async def check_token(ticket: str) -> Tuple[(bool, str)]:
url = (config.get('jx3-api').get('jx3-url') + '/token/validity')
token = config.get('jx3-api').get('jx3-token')
params = {'token': token, 'ticket': ticket}
async with httpx.AsyncClient(headers=get_user_agent()) as client:
try:
req_url = (await client.get(url=url, params=params))
req = req_url.json()
code = req['code']
msg = req['msg']
return ((code == 200), msg)
except Exception as e:
return (False, str(e))<|docstring|>检查token有效性<|endoftext|> |
cfb06ca66680527bff252e6dfc010d0261b4fcef6c964f1652b5091de8d55714 | def reset(self):
'\n reset the models hidden layer when starting a new rollout\n '
if hasattr(self, 'short_term_memory'):
self.short_term_memory = deque()
self.state = torch.zeros(1, self.model.state_size).to(self.device) | reset the models hidden layer when starting a new rollout | 3dcdrl/dummy_agent.py | reset | NicholasSperryGrandhomme/Improving-RL-Navigation-using-TTA | 28 | python | def reset(self):
'\n \n '
if hasattr(self, 'short_term_memory'):
self.short_term_memory = deque()
self.state = torch.zeros(1, self.model.state_size).to(self.device) | def reset(self):
'\n \n '
if hasattr(self, 'short_term_memory'):
self.short_term_memory = deque()
self.state = torch.zeros(1, self.model.state_size).to(self.device)<|docstring|>reset the models hidden layer when starting a new rollout<|endoftext|> |
14aec0da0c9d992d9d05cb89d06a95201f2f916d564848344dafad42f2fa7aa3 | def _prepare_observation(self, observation):
'\n As the network expects an input of n frames, we must store a small\n short term memory of frames. At input this is completely empty so \n I pad with the firt observations 4 times, generally this is only used when the network\n is not recurrent\n '
if (len(self.short_term_memory) == 0):
for _ in range(self.exp_size):
self.short_term_memory.append(observation)
self.short_term_memory.popleft()
self.short_term_memory.append(observation)
return np.vstack(self.short_term_memory) | As the network expects an input of n frames, we must store a small
short term memory of frames. At input this is completely empty so
I pad with the firt observations 4 times, generally this is only used when the network
is not recurrent | 3dcdrl/dummy_agent.py | _prepare_observation | NicholasSperryGrandhomme/Improving-RL-Navigation-using-TTA | 28 | python | def _prepare_observation(self, observation):
'\n As the network expects an input of n frames, we must store a small\n short term memory of frames. At input this is completely empty so \n I pad with the firt observations 4 times, generally this is only used when the network\n is not recurrent\n '
if (len(self.short_term_memory) == 0):
for _ in range(self.exp_size):
self.short_term_memory.append(observation)
self.short_term_memory.popleft()
self.short_term_memory.append(observation)
return np.vstack(self.short_term_memory) | def _prepare_observation(self, observation):
'\n As the network expects an input of n frames, we must store a small\n short term memory of frames. At input this is completely empty so \n I pad with the firt observations 4 times, generally this is only used when the network\n is not recurrent\n '
if (len(self.short_term_memory) == 0):
for _ in range(self.exp_size):
self.short_term_memory.append(observation)
self.short_term_memory.popleft()
self.short_term_memory.append(observation)
return np.vstack(self.short_term_memory)<|docstring|>As the network expects an input of n frames, we must store a small
short term memory of frames. At input this is completely empty so
I pad with the firt observations 4 times, generally this is only used when the network
is not recurrent<|endoftext|> |
5fe336f86a021340f36ba8a0ab435ad93313fc76a300e043fc829320c1cde947 | @staticmethod
def reverse_list(head):
'Fantasic code!'
new_head = None
while head:
(head.next, head, new_head) = (new_head, head.next, head)
return new_head | Fantasic code! | misc/linked_list_class.py | reverse_list | alexyvassili/code-challenges | 0 | python | @staticmethod
def reverse_list(head):
new_head = None
while head:
(head.next, head, new_head) = (new_head, head.next, head)
return new_head | @staticmethod
def reverse_list(head):
new_head = None
while head:
(head.next, head, new_head) = (new_head, head.next, head)
return new_head<|docstring|>Fantasic code!<|endoftext|> |
3f542cd6568687c99737f531a096983627f44a7fd3b94f7100c54146e0be0694 | def delete_port_flows_log(self, port, log_id):
'Delete all flows log for given port and log_id'
event = port.event
if (event == log_const.ACCEPT_EVENT):
self._delete_accept_flows_log(port, log_id)
elif (event == log_const.DROP_EVENT):
self._delete_drop_flows_log(port, log_id)
else:
self._delete_accept_flows_log(port, log_id)
self._delete_drop_flows_log(port, log_id) | Delete all flows log for given port and log_id | neutron/services/logapi/drivers/openvswitch/ovs_firewall_log.py | delete_port_flows_log | urimeba/neutron | 1,080 | python | def delete_port_flows_log(self, port, log_id):
event = port.event
if (event == log_const.ACCEPT_EVENT):
self._delete_accept_flows_log(port, log_id)
elif (event == log_const.DROP_EVENT):
self._delete_drop_flows_log(port, log_id)
else:
self._delete_accept_flows_log(port, log_id)
self._delete_drop_flows_log(port, log_id) | def delete_port_flows_log(self, port, log_id):
event = port.event
if (event == log_const.ACCEPT_EVENT):
self._delete_accept_flows_log(port, log_id)
elif (event == log_const.DROP_EVENT):
self._delete_drop_flows_log(port, log_id)
else:
self._delete_accept_flows_log(port, log_id)
self._delete_drop_flows_log(port, log_id)<|docstring|>Delete all flows log for given port and log_id<|endoftext|> |
2d0f557135046e3139603f516b58cd7485b44a5a165e6ce89425e58aa36d773e | def set_order_separators(dict_levels):
"\n Order of parsing: [',', ' ', '+', '-']\n Function returns the parsing separators in the right order according to levels\n ??? not optimal ???\n "
keys = sorted(list(dict_levels.keys()))
if (keys != []):
(min_level, max_level) = [int(keys[0].split('_')[0]), (int(keys[(len(keys) - 1)].split('_')[0]) + 1)]
orders = [((str(j) + '_') + i) for j in range(min_level, max_level) for i in [',', ' ', '+', '-']]
new_order = [element for element in orders if (element in keys)]
else:
new_order = []
return new_order | Order of parsing: [',', ' ', '+', '-']
Function returns the parsing separators in the right order according to levels
??? not optimal ??? | tools/Assembly/KEGG_analysis/make_graphs.py | set_order_separators | EBI-Metagenomics/pipeline-v5 | 10 | python | def set_order_separators(dict_levels):
"\n Order of parsing: [',', ' ', '+', '-']\n Function returns the parsing separators in the right order according to levels\n ??? not optimal ???\n "
keys = sorted(list(dict_levels.keys()))
if (keys != []):
(min_level, max_level) = [int(keys[0].split('_')[0]), (int(keys[(len(keys) - 1)].split('_')[0]) + 1)]
orders = [((str(j) + '_') + i) for j in range(min_level, max_level) for i in [',', ' ', '+', '-']]
new_order = [element for element in orders if (element in keys)]
else:
new_order = []
return new_order | def set_order_separators(dict_levels):
"\n Order of parsing: [',', ' ', '+', '-']\n Function returns the parsing separators in the right order according to levels\n ??? not optimal ???\n "
keys = sorted(list(dict_levels.keys()))
if (keys != []):
(min_level, max_level) = [int(keys[0].split('_')[0]), (int(keys[(len(keys) - 1)].split('_')[0]) + 1)]
orders = [((str(j) + '_') + i) for j in range(min_level, max_level) for i in [',', ' ', '+', '-']]
new_order = [element for element in orders if (element in keys)]
else:
new_order = []
return new_order<|docstring|>Order of parsing: [',', ' ', '+', '-']
Function returns the parsing separators in the right order according to levels
??? not optimal ???<|endoftext|> |
e1d0d3c5fd415d9f96c307327ed7f0c9ca409513e3b7afdc2333f91237fc7740 | def add_to_dict_of_levels(dict_levels, c, cur_level, index):
"\n Function returns the dict of positions according to the level of space or comma\n Example: {'1_,': [14], '2_,': [9], '0_ ': [3], '1_ ': [12]}\n comma of level 1: position 14\n comma of level 2: position 9\n space of level 0: position 3\n space of level 1: position 12\n "
symbol = ((str(cur_level) + '_') + c)
if (symbol not in dict_levels):
dict_levels[symbol] = []
dict_levels[symbol].append(index)
return dict_levels | Function returns the dict of positions according to the level of space or comma
Example: {'1_,': [14], '2_,': [9], '0_ ': [3], '1_ ': [12]}
comma of level 1: position 14
comma of level 2: position 9
space of level 0: position 3
space of level 1: position 12 | tools/Assembly/KEGG_analysis/make_graphs.py | add_to_dict_of_levels | EBI-Metagenomics/pipeline-v5 | 10 | python | def add_to_dict_of_levels(dict_levels, c, cur_level, index):
"\n Function returns the dict of positions according to the level of space or comma\n Example: {'1_,': [14], '2_,': [9], '0_ ': [3], '1_ ': [12]}\n comma of level 1: position 14\n comma of level 2: position 9\n space of level 0: position 3\n space of level 1: position 12\n "
symbol = ((str(cur_level) + '_') + c)
if (symbol not in dict_levels):
dict_levels[symbol] = []
dict_levels[symbol].append(index)
return dict_levels | def add_to_dict_of_levels(dict_levels, c, cur_level, index):
"\n Function returns the dict of positions according to the level of space or comma\n Example: {'1_,': [14], '2_,': [9], '0_ ': [3], '1_ ': [12]}\n comma of level 1: position 14\n comma of level 2: position 9\n space of level 0: position 3\n space of level 1: position 12\n "
symbol = ((str(cur_level) + '_') + c)
if (symbol not in dict_levels):
dict_levels[symbol] = []
dict_levels[symbol].append(index)
return dict_levels<|docstring|>Function returns the dict of positions according to the level of space or comma
Example: {'1_,': [14], '2_,': [9], '0_ ': [3], '1_ ': [12]}
comma of level 1: position 14
comma of level 2: position 9
space of level 0: position 3
space of level 1: position 12<|endoftext|> |
932ce730e07f0dbf5a68bea54b5d672bee743c4fde208b12ac641c8960f04d8d | def set_brackets(pathway):
'\n Function defines levels of all brackets in expression. The output will be used by function <check_brackets>\n Example 1:\n expression: A B (C,D)\n levels: -1,-1,-1,-1,0,-1,-1,-1,0\n Example 2:\n expression: (A B (C,D))\n levels: 0,-1,-1,-1,-1,1,-1,-1,-1,1,0\n :param pathway: string expression\n :return: levels of brackets\n '
levels_brackets = []
cur_open = []
num = (- 1)
for c in pathway:
if (c == '('):
num += 1
cur_open.append(num)
levels_brackets.append(num)
elif (c == ')'):
levels_brackets.append(cur_open[(len(cur_open) - 1)])
cur_open.pop()
else:
levels_brackets.append((- 1))
return levels_brackets | Function defines levels of all brackets in expression. The output will be used by function <check_brackets>
Example 1:
expression: A B (C,D)
levels: -1,-1,-1,-1,0,-1,-1,-1,0
Example 2:
expression: (A B (C,D))
levels: 0,-1,-1,-1,-1,1,-1,-1,-1,1,0
:param pathway: string expression
:return: levels of brackets | tools/Assembly/KEGG_analysis/make_graphs.py | set_brackets | EBI-Metagenomics/pipeline-v5 | 10 | python | def set_brackets(pathway):
'\n Function defines levels of all brackets in expression. The output will be used by function <check_brackets>\n Example 1:\n expression: A B (C,D)\n levels: -1,-1,-1,-1,0,-1,-1,-1,0\n Example 2:\n expression: (A B (C,D))\n levels: 0,-1,-1,-1,-1,1,-1,-1,-1,1,0\n :param pathway: string expression\n :return: levels of brackets\n '
levels_brackets = []
cur_open = []
num = (- 1)
for c in pathway:
if (c == '('):
num += 1
cur_open.append(num)
levels_brackets.append(num)
elif (c == ')'):
levels_brackets.append(cur_open[(len(cur_open) - 1)])
cur_open.pop()
else:
levels_brackets.append((- 1))
return levels_brackets | def set_brackets(pathway):
'\n Function defines levels of all brackets in expression. The output will be used by function <check_brackets>\n Example 1:\n expression: A B (C,D)\n levels: -1,-1,-1,-1,0,-1,-1,-1,0\n Example 2:\n expression: (A B (C,D))\n levels: 0,-1,-1,-1,-1,1,-1,-1,-1,1,0\n :param pathway: string expression\n :return: levels of brackets\n '
levels_brackets = []
cur_open = []
num = (- 1)
for c in pathway:
if (c == '('):
num += 1
cur_open.append(num)
levels_brackets.append(num)
elif (c == ')'):
levels_brackets.append(cur_open[(len(cur_open) - 1)])
cur_open.pop()
else:
levels_brackets.append((- 1))
return levels_brackets<|docstring|>Function defines levels of all brackets in expression. The output will be used by function <check_brackets>
Example 1:
expression: A B (C,D)
levels: -1,-1,-1,-1,0,-1,-1,-1,0
Example 2:
expression: (A B (C,D))
levels: 0,-1,-1,-1,-1,1,-1,-1,-1,1,0
:param pathway: string expression
:return: levels of brackets<|endoftext|> |
8271dd5a6d921b839476c7cc3186bdb27012d010468bd7a0f6ad4e6125785bcf | def set_levels(pathway):
"\n Function creates a dictionary of separators in pathway.\n Keys format: level_separator (ex. '1_,' or '0_ ')\n Values: list of positions in expression\n Example:\n expression: D (A+B) -> levels: 0011111 -> dict_levels: {'0_ ':[1], '1+':[4] }\n\n :param pathway: string expression\n :return: dict. of separators with their positions\n "
dict_levels = {}
L = len(pathway)
(cur_level, index) = [0 for _ in range(2)]
while (index < L):
c = pathway[index]
if ((c == ' ') or (c == ',') or (c == '-') or (c == '+')):
dict_levels = add_to_dict_of_levels(dict_levels, c, cur_level, index)
elif (c == '('):
cur_level += 1
elif (c == ')'):
cur_level -= 1
else:
index += 1
if (index < L):
while (pathway[index] not in [' ', ',', '(', ')', '-', '+']):
index += 1
if (index >= L):
break
index -= 1
index += 1
return dict_levels | Function creates a dictionary of separators in pathway.
Keys format: level_separator (ex. '1_,' or '0_ ')
Values: list of positions in expression
Example:
expression: D (A+B) -> levels: 0011111 -> dict_levels: {'0_ ':[1], '1+':[4] }
:param pathway: string expression
:return: dict. of separators with their positions | tools/Assembly/KEGG_analysis/make_graphs.py | set_levels | EBI-Metagenomics/pipeline-v5 | 10 | python | def set_levels(pathway):
"\n Function creates a dictionary of separators in pathway.\n Keys format: level_separator (ex. '1_,' or '0_ ')\n Values: list of positions in expression\n Example:\n expression: D (A+B) -> levels: 0011111 -> dict_levels: {'0_ ':[1], '1+':[4] }\n\n :param pathway: string expression\n :return: dict. of separators with their positions\n "
dict_levels = {}
L = len(pathway)
(cur_level, index) = [0 for _ in range(2)]
while (index < L):
c = pathway[index]
if ((c == ' ') or (c == ',') or (c == '-') or (c == '+')):
dict_levels = add_to_dict_of_levels(dict_levels, c, cur_level, index)
elif (c == '('):
cur_level += 1
elif (c == ')'):
cur_level -= 1
else:
index += 1
if (index < L):
while (pathway[index] not in [' ', ',', '(', ')', '-', '+']):
index += 1
if (index >= L):
break
index -= 1
index += 1
return dict_levels | def set_levels(pathway):
"\n Function creates a dictionary of separators in pathway.\n Keys format: level_separator (ex. '1_,' or '0_ ')\n Values: list of positions in expression\n Example:\n expression: D (A+B) -> levels: 0011111 -> dict_levels: {'0_ ':[1], '1+':[4] }\n\n :param pathway: string expression\n :return: dict. of separators with their positions\n "
dict_levels = {}
L = len(pathway)
(cur_level, index) = [0 for _ in range(2)]
while (index < L):
c = pathway[index]
if ((c == ' ') or (c == ',') or (c == '-') or (c == '+')):
dict_levels = add_to_dict_of_levels(dict_levels, c, cur_level, index)
elif (c == '('):
cur_level += 1
elif (c == ')'):
cur_level -= 1
else:
index += 1
if (index < L):
while (pathway[index] not in [' ', ',', '(', ')', '-', '+']):
index += 1
if (index >= L):
break
index -= 1
index += 1
return dict_levels<|docstring|>Function creates a dictionary of separators in pathway.
Keys format: level_separator (ex. '1_,' or '0_ ')
Values: list of positions in expression
Example:
expression: D (A+B) -> levels: 0011111 -> dict_levels: {'0_ ':[1], '1+':[4] }
:param pathway: string expression
:return: dict. of separators with their positions<|endoftext|> |
4beea9315c241171f0cf4cf84c1f881018a81b86a0742e2c9a8a3766202a1dd0 | def check_brackets(pathway, levels_brackets):
'\n Function checks is this expression in brackets. Returns without if true\n Example: input (A B C)\n return: A B C\n :param pathway: input string expression\n :return: output string expression\n '
L = len(pathway)
if ((pathway[0] == '(') and (pathway[(L - 1)] == ')') and (levels_brackets[0] == levels_brackets[(L - 1)])):
return pathway[1:(L - 1)]
else:
return pathway | Function checks is this expression in brackets. Returns without if true
Example: input (A B C)
return: A B C
:param pathway: input string expression
:return: output string expression | tools/Assembly/KEGG_analysis/make_graphs.py | check_brackets | EBI-Metagenomics/pipeline-v5 | 10 | python | def check_brackets(pathway, levels_brackets):
'\n Function checks is this expression in brackets. Returns without if true\n Example: input (A B C)\n return: A B C\n :param pathway: input string expression\n :return: output string expression\n '
L = len(pathway)
if ((pathway[0] == '(') and (pathway[(L - 1)] == ')') and (levels_brackets[0] == levels_brackets[(L - 1)])):
return pathway[1:(L - 1)]
else:
return pathway | def check_brackets(pathway, levels_brackets):
'\n Function checks is this expression in brackets. Returns without if true\n Example: input (A B C)\n return: A B C\n :param pathway: input string expression\n :return: output string expression\n '
L = len(pathway)
if ((pathway[0] == '(') and (pathway[(L - 1)] == ')') and (levels_brackets[0] == levels_brackets[(L - 1)])):
return pathway[1:(L - 1)]
else:
return pathway<|docstring|>Function checks is this expression in brackets. Returns without if true
Example: input (A B C)
return: A B C
:param pathway: input string expression
:return: output string expression<|endoftext|> |
3baa93cea841a62192a2a981969d1d5e455fef15cfdaece2a889569db9798226 | def recursive_parsing(G, dict_edges, unnecessary_nodes, expression, start_node, end_node, weight):
'\n Main parser:\n - adds edges and nodes to global graph G\n - adds names of edges to global dictionary of edges\n\n :param expression: current string expression to parse\n :param start_node: num of node from which expression sequence would be started\n :param end_node: num of node to which expression sequence would be finished\n :param weight: weight of edge (0 for unnecessary edges, 1 - for necessary, float - for parts of complex)\n :return: graph, dict of edges\n '
if (expression == '--'):
name_missing = 'K00000'
G.add_edge(start_node, end_node, label=name_missing, weight=0, weight_new=0, name='-')
unnecessary_nodes.append(name_missing)
if (name_missing not in dict_edges):
dict_edges[name_missing] = []
dict_edges[name_missing].append([start_node, end_node])
return (G, dict_edges, unnecessary_nodes)
expression = check_brackets(expression, set_brackets(expression))
cur_dict_levels = set_levels(expression)
separators_order = set_order_separators(cur_dict_levels)
cur_weight = weight
if (len(separators_order) == 1):
if ((separators_order[0] == '0_-') and (expression[0] == '-')):
G.add_edge(start_node, end_node, label=expression[1:], weight=0, weight_new=0, name='-')
unnecessary_nodes.append(expression[1:])
if (expression[1:] not in dict_edges):
dict_edges[expression[1:]] = []
dict_edges[expression[1:]].append([start_node, end_node])
return (G, dict_edges, unnecessary_nodes)
if (separators_order != []):
field = separators_order[0]
symbol = field.split('_')[1]
if ((symbol == '+') or (symbol == ' ')):
cur_weight = (cur_weight / (len(cur_dict_levels[field]) + 1))
separators = list(np.array(sorted(cur_dict_levels[field])))
cur_sep = 0
cur_start_node = start_node
cur_end_node = end_node
for (separator, num) in zip(separators, range(len(separators))):
if ((symbol == ' ') or (symbol == '+') or (symbol == '-')):
cur_end_node = len(list(G.nodes()))
G.add_node(cur_end_node)
if ((symbol == '-') and (num > 0)):
cur_weight = 0
subexpression = expression[cur_sep:separator]
(G, dict_edges, unnecessary_nodes) = recursive_parsing(G=G, dict_edges=dict_edges, unnecessary_nodes=unnecessary_nodes, expression=subexpression, start_node=cur_start_node, end_node=cur_end_node, weight=cur_weight)
cur_sep = (separator + 1)
if ((symbol == ' ') or (symbol == '+') or (symbol == '-')):
cur_start_node = cur_end_node
num += 1
if ((symbol == ' ') or (symbol == '+') or (symbol == '-')):
cur_start_node = cur_end_node
cur_end_node = end_node
if ((symbol == '-') and (num > 0)):
cur_weight = 0
(G, dict_edges, unnecessary_nodes) = recursive_parsing(G=G, dict_edges=dict_edges, unnecessary_nodes=unnecessary_nodes, expression=expression[cur_sep:len(expression)], start_node=cur_start_node, end_node=cur_end_node, weight=cur_weight)
return (G, dict_edges, unnecessary_nodes)
else:
if (cur_weight == 0):
G.add_edge(start_node, end_node, label=expression, weight=cur_weight, weight_new=cur_weight, name='-')
unnecessary_nodes.append(expression)
else:
G.add_edge(start_node, end_node, label=expression, weight=cur_weight, weight_new=cur_weight, name='node')
if (expression not in dict_edges):
dict_edges[expression] = []
dict_edges[expression].append([start_node, end_node])
return (G, dict_edges, unnecessary_nodes) | Main parser:
- adds edges and nodes to global graph G
- adds names of edges to global dictionary of edges
:param expression: current string expression to parse
:param start_node: num of node from which expression sequence would be started
:param end_node: num of node to which expression sequence would be finished
:param weight: weight of edge (0 for unnecessary edges, 1 - for necessary, float - for parts of complex)
:return: graph, dict of edges | tools/Assembly/KEGG_analysis/make_graphs.py | recursive_parsing | EBI-Metagenomics/pipeline-v5 | 10 | python | def recursive_parsing(G, dict_edges, unnecessary_nodes, expression, start_node, end_node, weight):
'\n Main parser:\n - adds edges and nodes to global graph G\n - adds names of edges to global dictionary of edges\n\n :param expression: current string expression to parse\n :param start_node: num of node from which expression sequence would be started\n :param end_node: num of node to which expression sequence would be finished\n :param weight: weight of edge (0 for unnecessary edges, 1 - for necessary, float - for parts of complex)\n :return: graph, dict of edges\n '
if (expression == '--'):
name_missing = 'K00000'
G.add_edge(start_node, end_node, label=name_missing, weight=0, weight_new=0, name='-')
unnecessary_nodes.append(name_missing)
if (name_missing not in dict_edges):
dict_edges[name_missing] = []
dict_edges[name_missing].append([start_node, end_node])
return (G, dict_edges, unnecessary_nodes)
expression = check_brackets(expression, set_brackets(expression))
cur_dict_levels = set_levels(expression)
separators_order = set_order_separators(cur_dict_levels)
cur_weight = weight
if (len(separators_order) == 1):
if ((separators_order[0] == '0_-') and (expression[0] == '-')):
G.add_edge(start_node, end_node, label=expression[1:], weight=0, weight_new=0, name='-')
unnecessary_nodes.append(expression[1:])
if (expression[1:] not in dict_edges):
dict_edges[expression[1:]] = []
dict_edges[expression[1:]].append([start_node, end_node])
return (G, dict_edges, unnecessary_nodes)
if (separators_order != []):
field = separators_order[0]
symbol = field.split('_')[1]
if ((symbol == '+') or (symbol == ' ')):
cur_weight = (cur_weight / (len(cur_dict_levels[field]) + 1))
separators = list(np.array(sorted(cur_dict_levels[field])))
cur_sep = 0
cur_start_node = start_node
cur_end_node = end_node
for (separator, num) in zip(separators, range(len(separators))):
if ((symbol == ' ') or (symbol == '+') or (symbol == '-')):
cur_end_node = len(list(G.nodes()))
G.add_node(cur_end_node)
if ((symbol == '-') and (num > 0)):
cur_weight = 0
subexpression = expression[cur_sep:separator]
(G, dict_edges, unnecessary_nodes) = recursive_parsing(G=G, dict_edges=dict_edges, unnecessary_nodes=unnecessary_nodes, expression=subexpression, start_node=cur_start_node, end_node=cur_end_node, weight=cur_weight)
cur_sep = (separator + 1)
if ((symbol == ' ') or (symbol == '+') or (symbol == '-')):
cur_start_node = cur_end_node
num += 1
if ((symbol == ' ') or (symbol == '+') or (symbol == '-')):
cur_start_node = cur_end_node
cur_end_node = end_node
if ((symbol == '-') and (num > 0)):
cur_weight = 0
(G, dict_edges, unnecessary_nodes) = recursive_parsing(G=G, dict_edges=dict_edges, unnecessary_nodes=unnecessary_nodes, expression=expression[cur_sep:len(expression)], start_node=cur_start_node, end_node=cur_end_node, weight=cur_weight)
return (G, dict_edges, unnecessary_nodes)
else:
if (cur_weight == 0):
G.add_edge(start_node, end_node, label=expression, weight=cur_weight, weight_new=cur_weight, name='-')
unnecessary_nodes.append(expression)
else:
G.add_edge(start_node, end_node, label=expression, weight=cur_weight, weight_new=cur_weight, name='node')
if (expression not in dict_edges):
dict_edges[expression] = []
dict_edges[expression].append([start_node, end_node])
return (G, dict_edges, unnecessary_nodes) | def recursive_parsing(G, dict_edges, unnecessary_nodes, expression, start_node, end_node, weight):
'\n Main parser:\n - adds edges and nodes to global graph G\n - adds names of edges to global dictionary of edges\n\n :param expression: current string expression to parse\n :param start_node: num of node from which expression sequence would be started\n :param end_node: num of node to which expression sequence would be finished\n :param weight: weight of edge (0 for unnecessary edges, 1 - for necessary, float - for parts of complex)\n :return: graph, dict of edges\n '
if (expression == '--'):
name_missing = 'K00000'
G.add_edge(start_node, end_node, label=name_missing, weight=0, weight_new=0, name='-')
unnecessary_nodes.append(name_missing)
if (name_missing not in dict_edges):
dict_edges[name_missing] = []
dict_edges[name_missing].append([start_node, end_node])
return (G, dict_edges, unnecessary_nodes)
expression = check_brackets(expression, set_brackets(expression))
cur_dict_levels = set_levels(expression)
separators_order = set_order_separators(cur_dict_levels)
cur_weight = weight
if (len(separators_order) == 1):
if ((separators_order[0] == '0_-') and (expression[0] == '-')):
G.add_edge(start_node, end_node, label=expression[1:], weight=0, weight_new=0, name='-')
unnecessary_nodes.append(expression[1:])
if (expression[1:] not in dict_edges):
dict_edges[expression[1:]] = []
dict_edges[expression[1:]].append([start_node, end_node])
return (G, dict_edges, unnecessary_nodes)
if (separators_order != []):
field = separators_order[0]
symbol = field.split('_')[1]
if ((symbol == '+') or (symbol == ' ')):
cur_weight = (cur_weight / (len(cur_dict_levels[field]) + 1))
separators = list(np.array(sorted(cur_dict_levels[field])))
cur_sep = 0
cur_start_node = start_node
cur_end_node = end_node
for (separator, num) in zip(separators, range(len(separators))):
if ((symbol == ' ') or (symbol == '+') or (symbol == '-')):
cur_end_node = len(list(G.nodes()))
G.add_node(cur_end_node)
if ((symbol == '-') and (num > 0)):
cur_weight = 0
subexpression = expression[cur_sep:separator]
(G, dict_edges, unnecessary_nodes) = recursive_parsing(G=G, dict_edges=dict_edges, unnecessary_nodes=unnecessary_nodes, expression=subexpression, start_node=cur_start_node, end_node=cur_end_node, weight=cur_weight)
cur_sep = (separator + 1)
if ((symbol == ' ') or (symbol == '+') or (symbol == '-')):
cur_start_node = cur_end_node
num += 1
if ((symbol == ' ') or (symbol == '+') or (symbol == '-')):
cur_start_node = cur_end_node
cur_end_node = end_node
if ((symbol == '-') and (num > 0)):
cur_weight = 0
(G, dict_edges, unnecessary_nodes) = recursive_parsing(G=G, dict_edges=dict_edges, unnecessary_nodes=unnecessary_nodes, expression=expression[cur_sep:len(expression)], start_node=cur_start_node, end_node=cur_end_node, weight=cur_weight)
return (G, dict_edges, unnecessary_nodes)
else:
if (cur_weight == 0):
G.add_edge(start_node, end_node, label=expression, weight=cur_weight, weight_new=cur_weight, name='-')
unnecessary_nodes.append(expression)
else:
G.add_edge(start_node, end_node, label=expression, weight=cur_weight, weight_new=cur_weight, name='node')
if (expression not in dict_edges):
dict_edges[expression] = []
dict_edges[expression].append([start_node, end_node])
return (G, dict_edges, unnecessary_nodes)<|docstring|>Main parser:
- adds edges and nodes to global graph G
- adds names of edges to global dictionary of edges
:param expression: current string expression to parse
:param start_node: num of node from which expression sequence would be started
:param end_node: num of node to which expression sequence would be finished
:param weight: weight of edge (0 for unnecessary edges, 1 - for necessary, float - for parts of complex)
:return: graph, dict of edges<|endoftext|> |
d240485e1c33fe9e6fb4576a05683bf66451eb523a1b1c4c8caf3bde66950668 | def pathways_processing(input_file, outdir):
'\n Main function for processing each pathway.\n All pathways were written in one file by lines in format: <name>:<pathway>.\n Function creates dictionary key: name; value: (graph, dict_edges)\n :param input_file: input file with pathways\n :return:\n '
graphs = {}
with open(input_file, 'r') as file_in:
for line in file_in:
line = line.strip().split(':')
pathway = line[1]
name = line[0]
print(name)
Graph = nx.MultiDiGraph()
Graph.add_node(0, color='green')
Graph.add_node(1, color='red')
(Graph, dict_edges, unnecessary_nodes) = recursive_parsing(G=Graph, dict_edges={}, unnecessary_nodes=[], expression=pathway, start_node=0, end_node=1, weight=1)
graphs[name] = tuple([Graph, dict_edges, unnecessary_nodes])
print('done')
path_output = os.path.join(outdir, 'graphs.pkl')
f = open(path_output, 'wb')
pickle.dump(graphs, f)
f.close() | Main function for processing each pathway.
All pathways were written in one file by lines in format: <name>:<pathway>.
Function creates dictionary key: name; value: (graph, dict_edges)
:param input_file: input file with pathways
:return: | tools/Assembly/KEGG_analysis/make_graphs.py | pathways_processing | EBI-Metagenomics/pipeline-v5 | 10 | python | def pathways_processing(input_file, outdir):
'\n Main function for processing each pathway.\n All pathways were written in one file by lines in format: <name>:<pathway>.\n Function creates dictionary key: name; value: (graph, dict_edges)\n :param input_file: input file with pathways\n :return:\n '
graphs = {}
with open(input_file, 'r') as file_in:
for line in file_in:
line = line.strip().split(':')
pathway = line[1]
name = line[0]
print(name)
Graph = nx.MultiDiGraph()
Graph.add_node(0, color='green')
Graph.add_node(1, color='red')
(Graph, dict_edges, unnecessary_nodes) = recursive_parsing(G=Graph, dict_edges={}, unnecessary_nodes=[], expression=pathway, start_node=0, end_node=1, weight=1)
graphs[name] = tuple([Graph, dict_edges, unnecessary_nodes])
print('done')
path_output = os.path.join(outdir, 'graphs.pkl')
f = open(path_output, 'wb')
pickle.dump(graphs, f)
f.close() | def pathways_processing(input_file, outdir):
'\n Main function for processing each pathway.\n All pathways were written in one file by lines in format: <name>:<pathway>.\n Function creates dictionary key: name; value: (graph, dict_edges)\n :param input_file: input file with pathways\n :return:\n '
graphs = {}
with open(input_file, 'r') as file_in:
for line in file_in:
line = line.strip().split(':')
pathway = line[1]
name = line[0]
print(name)
Graph = nx.MultiDiGraph()
Graph.add_node(0, color='green')
Graph.add_node(1, color='red')
(Graph, dict_edges, unnecessary_nodes) = recursive_parsing(G=Graph, dict_edges={}, unnecessary_nodes=[], expression=pathway, start_node=0, end_node=1, weight=1)
graphs[name] = tuple([Graph, dict_edges, unnecessary_nodes])
print('done')
path_output = os.path.join(outdir, 'graphs.pkl')
f = open(path_output, 'wb')
pickle.dump(graphs, f)
f.close()<|docstring|>Main function for processing each pathway.
All pathways were written in one file by lines in format: <name>:<pathway>.
Function creates dictionary key: name; value: (graph, dict_edges)
:param input_file: input file with pathways
:return:<|endoftext|> |
776d9471747659f1cb28cfe8e0be3527a34bd4faaa3394a37ff1a9c66197cf1b | def test_init_with_notebook_task_named_parameters(self):
'\n Test the initializer with the named parameters.\n '
op = DatabricksSubmitRunOperator(task_id=TASK_ID, new_cluster=NEW_CLUSTER, notebook_task=NOTEBOOK_TASK)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
assert (expected == op.json) | Test the initializer with the named parameters. | tests/providers/databricks/operators/test_databricks.py | test_init_with_notebook_task_named_parameters | ansokchea/airflow | 15,947 | python | def test_init_with_notebook_task_named_parameters(self):
'\n \n '
op = DatabricksSubmitRunOperator(task_id=TASK_ID, new_cluster=NEW_CLUSTER, notebook_task=NOTEBOOK_TASK)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
assert (expected == op.json) | def test_init_with_notebook_task_named_parameters(self):
'\n \n '
op = DatabricksSubmitRunOperator(task_id=TASK_ID, new_cluster=NEW_CLUSTER, notebook_task=NOTEBOOK_TASK)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
assert (expected == op.json)<|docstring|>Test the initializer with the named parameters.<|endoftext|> |
f2e8d9832e95d4d47f7cce4e2461cfec7e06a0a5107177d4b3544b59855780df | def test_init_with_spark_python_task_named_parameters(self):
'\n Test the initializer with the named parameters.\n '
op = DatabricksSubmitRunOperator(task_id=TASK_ID, new_cluster=NEW_CLUSTER, spark_python_task=SPARK_PYTHON_TASK)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'spark_python_task': SPARK_PYTHON_TASK, 'run_name': TASK_ID})
assert (expected == op.json) | Test the initializer with the named parameters. | tests/providers/databricks/operators/test_databricks.py | test_init_with_spark_python_task_named_parameters | ansokchea/airflow | 15,947 | python | def test_init_with_spark_python_task_named_parameters(self):
'\n \n '
op = DatabricksSubmitRunOperator(task_id=TASK_ID, new_cluster=NEW_CLUSTER, spark_python_task=SPARK_PYTHON_TASK)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'spark_python_task': SPARK_PYTHON_TASK, 'run_name': TASK_ID})
assert (expected == op.json) | def test_init_with_spark_python_task_named_parameters(self):
'\n \n '
op = DatabricksSubmitRunOperator(task_id=TASK_ID, new_cluster=NEW_CLUSTER, spark_python_task=SPARK_PYTHON_TASK)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'spark_python_task': SPARK_PYTHON_TASK, 'run_name': TASK_ID})
assert (expected == op.json)<|docstring|>Test the initializer with the named parameters.<|endoftext|> |
4644f7c00d3d8a4b55bd8220c8c00be23dd9fdb543e8d46c6feb9e38dda27cef | def test_init_with_spark_submit_task_named_parameters(self):
'\n Test the initializer with the named parameters.\n '
op = DatabricksSubmitRunOperator(task_id=TASK_ID, new_cluster=NEW_CLUSTER, spark_submit_task=SPARK_SUBMIT_TASK)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'spark_submit_task': SPARK_SUBMIT_TASK, 'run_name': TASK_ID})
assert (expected == op.json) | Test the initializer with the named parameters. | tests/providers/databricks/operators/test_databricks.py | test_init_with_spark_submit_task_named_parameters | ansokchea/airflow | 15,947 | python | def test_init_with_spark_submit_task_named_parameters(self):
'\n \n '
op = DatabricksSubmitRunOperator(task_id=TASK_ID, new_cluster=NEW_CLUSTER, spark_submit_task=SPARK_SUBMIT_TASK)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'spark_submit_task': SPARK_SUBMIT_TASK, 'run_name': TASK_ID})
assert (expected == op.json) | def test_init_with_spark_submit_task_named_parameters(self):
'\n \n '
op = DatabricksSubmitRunOperator(task_id=TASK_ID, new_cluster=NEW_CLUSTER, spark_submit_task=SPARK_SUBMIT_TASK)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'spark_submit_task': SPARK_SUBMIT_TASK, 'run_name': TASK_ID})
assert (expected == op.json)<|docstring|>Test the initializer with the named parameters.<|endoftext|> |
dfbb696bd549c086312fc1bd34d5828f33fe9be5729e84952a10780d7c2cc73b | def test_init_with_json(self):
'\n Test the initializer with json data.\n '
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
assert (expected == op.json) | Test the initializer with json data. | tests/providers/databricks/operators/test_databricks.py | test_init_with_json | ansokchea/airflow | 15,947 | python | def test_init_with_json(self):
'\n \n '
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
assert (expected == op.json) | def test_init_with_json(self):
'\n \n '
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
assert (expected == op.json)<|docstring|>Test the initializer with json data.<|endoftext|> |
4f3cee4422e5e552f61f5b345590d139dbae025ff397800ef3235a48ee1b062b | def test_init_with_specified_run_name(self):
'\n Test the initializer with a specified run_name.\n '
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': RUN_NAME}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': RUN_NAME})
assert (expected == op.json) | Test the initializer with a specified run_name. | tests/providers/databricks/operators/test_databricks.py | test_init_with_specified_run_name | ansokchea/airflow | 15,947 | python | def test_init_with_specified_run_name(self):
'\n \n '
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': RUN_NAME}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': RUN_NAME})
assert (expected == op.json) | def test_init_with_specified_run_name(self):
'\n \n '
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': RUN_NAME}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': RUN_NAME})
assert (expected == op.json)<|docstring|>Test the initializer with a specified run_name.<|endoftext|> |
e2d3239311f233bf2727e3dc402eb90f318524b9951aece3e469b24bae85c8b4 | def test_init_with_merging(self):
'\n Test the initializer when json and other named parameters are both\n provided. The named parameters should override top level keys in the\n json dict.\n '
override_new_cluster = {'workers': 999}
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json, new_cluster=override_new_cluster)
expected = databricks_operator._deep_string_coerce({'new_cluster': override_new_cluster, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
assert (expected == op.json) | Test the initializer when json and other named parameters are both
provided. The named parameters should override top level keys in the
json dict. | tests/providers/databricks/operators/test_databricks.py | test_init_with_merging | ansokchea/airflow | 15,947 | python | def test_init_with_merging(self):
'\n Test the initializer when json and other named parameters are both\n provided. The named parameters should override top level keys in the\n json dict.\n '
override_new_cluster = {'workers': 999}
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json, new_cluster=override_new_cluster)
expected = databricks_operator._deep_string_coerce({'new_cluster': override_new_cluster, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
assert (expected == op.json) | def test_init_with_merging(self):
'\n Test the initializer when json and other named parameters are both\n provided. The named parameters should override top level keys in the\n json dict.\n '
override_new_cluster = {'workers': 999}
json = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=json, new_cluster=override_new_cluster)
expected = databricks_operator._deep_string_coerce({'new_cluster': override_new_cluster, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
assert (expected == op.json)<|docstring|>Test the initializer when json and other named parameters are both
provided. The named parameters should override top level keys in the
json dict.<|endoftext|> |
f500bfc98f1e9f84ad2d72bbf9d029659478f03bf55cf035d51ab9a598e03c50 | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_success(self, db_mock_class):
'\n Test the execute function in case where the run is successful.\n '
run = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '')
op.execute(None)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id) | Test the execute function in case where the run is successful. | tests/providers/databricks/operators/test_databricks.py | test_exec_success | ansokchea/airflow | 15,947 | python | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_success(self, db_mock_class):
'\n \n '
run = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', )
op.execute(None)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id) | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_success(self, db_mock_class):
'\n \n '
run = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', )
op.execute(None)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id)<|docstring|>Test the execute function in case where the run is successful.<|endoftext|> |
1efb03330bde9b83a19aaf2a9f367d066af397b5fe675721ff99218742d49e2c | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_failure(self, db_mock_class):
'\n Test the execute function in case where the run failed.\n '
run = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'FAILED', '')
with pytest.raises(AirflowException):
op.execute(None)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id) | Test the execute function in case where the run failed. | tests/providers/databricks/operators/test_databricks.py | test_exec_failure | ansokchea/airflow | 15,947 | python | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_failure(self, db_mock_class):
'\n \n '
run = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'FAILED', )
with pytest.raises(AirflowException):
op.execute(None)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id) | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_failure(self, db_mock_class):
'\n \n '
run = {'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK}
op = DatabricksSubmitRunOperator(task_id=TASK_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.submit_run.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'FAILED', )
with pytest.raises(AirflowException):
op.execute(None)
expected = databricks_operator._deep_string_coerce({'new_cluster': NEW_CLUSTER, 'notebook_task': NOTEBOOK_TASK, 'run_name': TASK_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.submit_run.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id)<|docstring|>Test the execute function in case where the run failed.<|endoftext|> |
a0d66ef26d374a032b0cba6682eb9639f8e3eaf92c2ce4dd1a0cb1b162bf3d83 | def test_init_with_named_parameters(self):
'\n Test the initializer with the named parameters.\n '
op = DatabricksRunNowOperator(job_id=JOB_ID, task_id=TASK_ID)
expected = databricks_operator._deep_string_coerce({'job_id': 42})
assert (expected == op.json) | Test the initializer with the named parameters. | tests/providers/databricks/operators/test_databricks.py | test_init_with_named_parameters | ansokchea/airflow | 15,947 | python | def test_init_with_named_parameters(self):
'\n \n '
op = DatabricksRunNowOperator(job_id=JOB_ID, task_id=TASK_ID)
expected = databricks_operator._deep_string_coerce({'job_id': 42})
assert (expected == op.json) | def test_init_with_named_parameters(self):
'\n \n '
op = DatabricksRunNowOperator(job_id=JOB_ID, task_id=TASK_ID)
expected = databricks_operator._deep_string_coerce({'job_id': 42})
assert (expected == op.json)<|docstring|>Test the initializer with the named parameters.<|endoftext|> |
500681d931a8e6e0fba8c2db3f7764d36f700c9196a28c7a5964a6b5d07d05c9 | def test_init_with_json(self):
'\n Test the initializer with json data.\n '
json = {'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS, 'python_params': PYTHON_PARAMS, 'spark_submit_params': SPARK_SUBMIT_PARAMS, 'job_id': JOB_ID}
op = DatabricksRunNowOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce({'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS, 'python_params': PYTHON_PARAMS, 'spark_submit_params': SPARK_SUBMIT_PARAMS, 'job_id': JOB_ID})
assert (expected == op.json) | Test the initializer with json data. | tests/providers/databricks/operators/test_databricks.py | test_init_with_json | ansokchea/airflow | 15,947 | python | def test_init_with_json(self):
'\n \n '
json = {'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS, 'python_params': PYTHON_PARAMS, 'spark_submit_params': SPARK_SUBMIT_PARAMS, 'job_id': JOB_ID}
op = DatabricksRunNowOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce({'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS, 'python_params': PYTHON_PARAMS, 'spark_submit_params': SPARK_SUBMIT_PARAMS, 'job_id': JOB_ID})
assert (expected == op.json) | def test_init_with_json(self):
'\n \n '
json = {'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS, 'python_params': PYTHON_PARAMS, 'spark_submit_params': SPARK_SUBMIT_PARAMS, 'job_id': JOB_ID}
op = DatabricksRunNowOperator(task_id=TASK_ID, json=json)
expected = databricks_operator._deep_string_coerce({'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS, 'python_params': PYTHON_PARAMS, 'spark_submit_params': SPARK_SUBMIT_PARAMS, 'job_id': JOB_ID})
assert (expected == op.json)<|docstring|>Test the initializer with json data.<|endoftext|> |
68e5d8de00024e72a9407018411c33d58001158ffcdda0a1a9e327b66e89065d | def test_init_with_merging(self):
'\n Test the initializer when json and other named parameters are both\n provided. The named parameters should override top level keys in the\n json dict.\n '
override_notebook_params = {'workers': 999}
json = {'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, json=json, job_id=JOB_ID, notebook_params=override_notebook_params, python_params=PYTHON_PARAMS, spark_submit_params=SPARK_SUBMIT_PARAMS)
expected = databricks_operator._deep_string_coerce({'notebook_params': override_notebook_params, 'jar_params': JAR_PARAMS, 'python_params': PYTHON_PARAMS, 'spark_submit_params': SPARK_SUBMIT_PARAMS, 'job_id': JOB_ID})
assert (expected == op.json) | Test the initializer when json and other named parameters are both
provided. The named parameters should override top level keys in the
json dict. | tests/providers/databricks/operators/test_databricks.py | test_init_with_merging | ansokchea/airflow | 15,947 | python | def test_init_with_merging(self):
'\n Test the initializer when json and other named parameters are both\n provided. The named parameters should override top level keys in the\n json dict.\n '
override_notebook_params = {'workers': 999}
json = {'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, json=json, job_id=JOB_ID, notebook_params=override_notebook_params, python_params=PYTHON_PARAMS, spark_submit_params=SPARK_SUBMIT_PARAMS)
expected = databricks_operator._deep_string_coerce({'notebook_params': override_notebook_params, 'jar_params': JAR_PARAMS, 'python_params': PYTHON_PARAMS, 'spark_submit_params': SPARK_SUBMIT_PARAMS, 'job_id': JOB_ID})
assert (expected == op.json) | def test_init_with_merging(self):
'\n Test the initializer when json and other named parameters are both\n provided. The named parameters should override top level keys in the\n json dict.\n '
override_notebook_params = {'workers': 999}
json = {'notebook_params': NOTEBOOK_PARAMS, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, json=json, job_id=JOB_ID, notebook_params=override_notebook_params, python_params=PYTHON_PARAMS, spark_submit_params=SPARK_SUBMIT_PARAMS)
expected = databricks_operator._deep_string_coerce({'notebook_params': override_notebook_params, 'jar_params': JAR_PARAMS, 'python_params': PYTHON_PARAMS, 'spark_submit_params': SPARK_SUBMIT_PARAMS, 'job_id': JOB_ID})
assert (expected == op.json)<|docstring|>Test the initializer when json and other named parameters are both
provided. The named parameters should override top level keys in the
json dict.<|endoftext|> |
55d53a06d3521d36f2affe110f02380f0485d670b6a20cd90ea6592d4d389b50 | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_success(self, db_mock_class):
'\n Test the execute function in case where the run is successful.\n '
run = {'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, job_id=JOB_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.run_now.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', '')
op.execute(None)
expected = databricks_operator._deep_string_coerce({'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS, 'job_id': JOB_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.run_now.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id) | Test the execute function in case where the run is successful. | tests/providers/databricks/operators/test_databricks.py | test_exec_success | ansokchea/airflow | 15,947 | python | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_success(self, db_mock_class):
'\n \n '
run = {'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, job_id=JOB_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.run_now.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', )
op.execute(None)
expected = databricks_operator._deep_string_coerce({'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS, 'job_id': JOB_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.run_now.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id) | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_success(self, db_mock_class):
'\n \n '
run = {'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, job_id=JOB_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.run_now.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'SUCCESS', )
op.execute(None)
expected = databricks_operator._deep_string_coerce({'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS, 'job_id': JOB_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.run_now.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id)<|docstring|>Test the execute function in case where the run is successful.<|endoftext|> |
c0bcb7a67c748dd88efd58ee1524364ebfd8c7e7ec155a2cf1e1d5a00e118352 | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_failure(self, db_mock_class):
'\n Test the execute function in case where the run failed.\n '
run = {'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, job_id=JOB_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.run_now.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'FAILED', '')
with pytest.raises(AirflowException):
op.execute(None)
expected = databricks_operator._deep_string_coerce({'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS, 'job_id': JOB_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.run_now.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id) | Test the execute function in case where the run failed. | tests/providers/databricks/operators/test_databricks.py | test_exec_failure | ansokchea/airflow | 15,947 | python | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_failure(self, db_mock_class):
'\n \n '
run = {'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, job_id=JOB_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.run_now.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'FAILED', )
with pytest.raises(AirflowException):
op.execute(None)
expected = databricks_operator._deep_string_coerce({'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS, 'job_id': JOB_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.run_now.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id) | @mock.patch('airflow.providers.databricks.operators.databricks.DatabricksHook')
def test_exec_failure(self, db_mock_class):
'\n \n '
run = {'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS}
op = DatabricksRunNowOperator(task_id=TASK_ID, job_id=JOB_ID, json=run)
db_mock = db_mock_class.return_value
db_mock.run_now.return_value = 1
db_mock.get_run_state.return_value = RunState('TERMINATED', 'FAILED', )
with pytest.raises(AirflowException):
op.execute(None)
expected = databricks_operator._deep_string_coerce({'notebook_params': NOTEBOOK_PARAMS, 'notebook_task': NOTEBOOK_TASK, 'jar_params': JAR_PARAMS, 'job_id': JOB_ID})
db_mock_class.assert_called_once_with(DEFAULT_CONN_ID, retry_limit=op.databricks_retry_limit, retry_delay=op.databricks_retry_delay)
db_mock.run_now.assert_called_once_with(expected)
db_mock.get_run_page_url.assert_called_once_with(RUN_ID)
db_mock.get_run_state.assert_called_once_with(RUN_ID)
assert (RUN_ID == op.run_id)<|docstring|>Test the execute function in case where the run failed.<|endoftext|> |
43267d8d716cf0ea305674291a9323fbe993e95d9fbeddb84cc9d5e4020b9c60 | def generate_feature_vectors(network_r_path, inH, threshold, featIdx, verbose=True):
' Generate individual market sheds for each feature in the input dataset\n \n INPUTS\n network_r [string] - path to raster from which to grab index for calculations in MCP\n mcp [skimage.graph.MCP_Geometric] - input graph\n inH [geopandas data frame] - geopandas data frame from which to calculate features\n threshold [list of int] - travel treshold from which to calculate vectors in units of graph\n featIdx [string] - column name in inH to append to output marketshed dataset\n \n RETURNS\n [geopandas dataframe]\n '
n = inH.shape[0]
feat_count = 0
complete_shapes = []
network_r = rasterio.open(network_r_path)
traversal_time = network_r.read()[(0, :, :)]
mcp = graph.MCP_Geometric(traversal_time)
thread_id = multiprocessing.current_process().name
for (idx, row) in inH.iterrows():
feat_count = (feat_count + 1)
if verbose:
tPrint(f'{thread_id}: {feat_count} of {n}')
cur_idx = network_r.index(row['geometry'].x, row['geometry'].y)
if ((cur_idx[0] > 0) and (cur_idx[1] > 0) and (cur_idx[0] < network_r.shape[0]) and (cur_idx[1] < network_r.shape[1])):
(costs, traceback) = mcp.find_costs([cur_idx])
for thresh in threshold:
within_time = ((costs < thresh) * 1).astype('int16')
all_shapes = []
for (cShape, value) in features.shapes(within_time, transform=network_r.transform):
if (value == 1.0):
all_shapes.append([shape(cShape)])
complete_shape = cascaded_union([x[0] for x in all_shapes])
complete_shapes.append([complete_shape, thresh, row[featIdx]])
final = gpd.GeoDataFrame(complete_shapes, columns=['geometry', 'threshold', 'IDX'], crs=network_r.crs)
return final | Generate individual market sheds for each feature in the input dataset
INPUTS
network_r [string] - path to raster from which to grab index for calculations in MCP
mcp [skimage.graph.MCP_Geometric] - input graph
inH [geopandas data frame] - geopandas data frame from which to calculate features
threshold [list of int] - travel treshold from which to calculate vectors in units of graph
featIdx [string] - column name in inH to append to output marketshed dataset
RETURNS
[geopandas dataframe] | src/INFRA_SAP/Notebooks/MP_HTH_INFRA_KEN_B__HospitalAssessments.py | generate_feature_vectors | worldbank/Khyber-Pakhtunkhwa-Accessibility-Analysis | 2 | python | def generate_feature_vectors(network_r_path, inH, threshold, featIdx, verbose=True):
' Generate individual market sheds for each feature in the input dataset\n \n INPUTS\n network_r [string] - path to raster from which to grab index for calculations in MCP\n mcp [skimage.graph.MCP_Geometric] - input graph\n inH [geopandas data frame] - geopandas data frame from which to calculate features\n threshold [list of int] - travel treshold from which to calculate vectors in units of graph\n featIdx [string] - column name in inH to append to output marketshed dataset\n \n RETURNS\n [geopandas dataframe]\n '
n = inH.shape[0]
feat_count = 0
complete_shapes = []
network_r = rasterio.open(network_r_path)
traversal_time = network_r.read()[(0, :, :)]
mcp = graph.MCP_Geometric(traversal_time)
thread_id = multiprocessing.current_process().name
for (idx, row) in inH.iterrows():
feat_count = (feat_count + 1)
if verbose:
tPrint(f'{thread_id}: {feat_count} of {n}')
cur_idx = network_r.index(row['geometry'].x, row['geometry'].y)
if ((cur_idx[0] > 0) and (cur_idx[1] > 0) and (cur_idx[0] < network_r.shape[0]) and (cur_idx[1] < network_r.shape[1])):
(costs, traceback) = mcp.find_costs([cur_idx])
for thresh in threshold:
within_time = ((costs < thresh) * 1).astype('int16')
all_shapes = []
for (cShape, value) in features.shapes(within_time, transform=network_r.transform):
if (value == 1.0):
all_shapes.append([shape(cShape)])
complete_shape = cascaded_union([x[0] for x in all_shapes])
complete_shapes.append([complete_shape, thresh, row[featIdx]])
final = gpd.GeoDataFrame(complete_shapes, columns=['geometry', 'threshold', 'IDX'], crs=network_r.crs)
return final | def generate_feature_vectors(network_r_path, inH, threshold, featIdx, verbose=True):
' Generate individual market sheds for each feature in the input dataset\n \n INPUTS\n network_r [string] - path to raster from which to grab index for calculations in MCP\n mcp [skimage.graph.MCP_Geometric] - input graph\n inH [geopandas data frame] - geopandas data frame from which to calculate features\n threshold [list of int] - travel treshold from which to calculate vectors in units of graph\n featIdx [string] - column name in inH to append to output marketshed dataset\n \n RETURNS\n [geopandas dataframe]\n '
n = inH.shape[0]
feat_count = 0
complete_shapes = []
network_r = rasterio.open(network_r_path)
traversal_time = network_r.read()[(0, :, :)]
mcp = graph.MCP_Geometric(traversal_time)
thread_id = multiprocessing.current_process().name
for (idx, row) in inH.iterrows():
feat_count = (feat_count + 1)
if verbose:
tPrint(f'{thread_id}: {feat_count} of {n}')
cur_idx = network_r.index(row['geometry'].x, row['geometry'].y)
if ((cur_idx[0] > 0) and (cur_idx[1] > 0) and (cur_idx[0] < network_r.shape[0]) and (cur_idx[1] < network_r.shape[1])):
(costs, traceback) = mcp.find_costs([cur_idx])
for thresh in threshold:
within_time = ((costs < thresh) * 1).astype('int16')
all_shapes = []
for (cShape, value) in features.shapes(within_time, transform=network_r.transform):
if (value == 1.0):
all_shapes.append([shape(cShape)])
complete_shape = cascaded_union([x[0] for x in all_shapes])
complete_shapes.append([complete_shape, thresh, row[featIdx]])
final = gpd.GeoDataFrame(complete_shapes, columns=['geometry', 'threshold', 'IDX'], crs=network_r.crs)
return final<|docstring|>Generate individual market sheds for each feature in the input dataset
INPUTS
network_r [string] - path to raster from which to grab index for calculations in MCP
mcp [skimage.graph.MCP_Geometric] - input graph
inH [geopandas data frame] - geopandas data frame from which to calculate features
threshold [list of int] - travel treshold from which to calculate vectors in units of graph
featIdx [string] - column name in inH to append to output marketshed dataset
RETURNS
[geopandas dataframe]<|endoftext|> |
275e10b97f670f048f7c84664d14873da15fbb6a61630e35e83141f94b039b2b | @property
def G_f(self):
'\n Synonym to `Gf`\n '
return self.Gf | Synonym to `Gf` | dynsys/freq_response_results.py | G_f | MEMAndersen/DynSys | 0 | python | @property
def G_f(self):
'\n \n '
return self.Gf | @property
def G_f(self):
'\n \n '
return self.Gf<|docstring|>Synonym to `Gf`<|endoftext|> |
054b179a3a6dc4771d563354723b96152a9dd72624a0bb87b53810e62da251a3 | def plot(self, i=None, j=None, positive_f_only: bool=True, subplot_kwargs={}, logy=False, axarr=None, **kwargs):
'\n Function to plot frequency response matrix (f,G_f)\n '
default_subplot_kwargs = {'sharex': 'col', 'sharey': 'row'}
subplot_kwargs = {**default_subplot_kwargs, **subplot_kwargs}
if (i is None):
i = range(self.G_f.shape[1])
if (j is None):
j = range(self.G_f.shape[2])
if (axarr is None):
(fig, axarr) = plt.subplots(len(i), len(j), **subplot_kwargs)
axarr = npy.matrix(axarr)
else:
fig = axarr[(0, 0)].get_figure()
for (row, _i) in enumerate(i):
for (col, _j) in enumerate(j):
try:
ax = axarr[(row, col)]
except IndexError:
break
self.plot_component(_i, _j, ax_magnitude=ax, plotPhase=False, **kwargs)
if logy:
ax.set_yscale('log')
if (ax.get_yaxis().get_scale() == 'log'):
logy = True
if (not logy):
ax.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
if (col == 0):
ax.set_ylabel(self.output_names[_i])
else:
ax.set_ylabel('')
if (row == 0):
ax.set_title(self.input_names[_j])
if (row != (len(i) - 1)):
ax.set_xlabel('')
ax.set_ylabel(ax.get_ylabel(), fontsize='x-small', rotation=0, horizontalAlignment='right', verticalAlignment='center', wrap=True)
ax.set_title(ax.get_title(), fontsize='x-small', horizontalAlignment='center', wrap=True)
fig.set_size_inches((14, 8))
fig.subplots_adjust(hspace=0.0, wspace=0.4)
fig.suptitle('Plot of G(f) frequency response matrix')
fig.subplots_adjust(left=0.2)
fig.align_ylabels()
if (not logy):
[ax.set_ylim(bottom=0.0) for ax in fig.get_axes()]
return (fig, axarr) | Function to plot frequency response matrix (f,G_f) | dynsys/freq_response_results.py | plot | MEMAndersen/DynSys | 0 | python | def plot(self, i=None, j=None, positive_f_only: bool=True, subplot_kwargs={}, logy=False, axarr=None, **kwargs):
'\n \n '
default_subplot_kwargs = {'sharex': 'col', 'sharey': 'row'}
subplot_kwargs = {**default_subplot_kwargs, **subplot_kwargs}
if (i is None):
i = range(self.G_f.shape[1])
if (j is None):
j = range(self.G_f.shape[2])
if (axarr is None):
(fig, axarr) = plt.subplots(len(i), len(j), **subplot_kwargs)
axarr = npy.matrix(axarr)
else:
fig = axarr[(0, 0)].get_figure()
for (row, _i) in enumerate(i):
for (col, _j) in enumerate(j):
try:
ax = axarr[(row, col)]
except IndexError:
break
self.plot_component(_i, _j, ax_magnitude=ax, plotPhase=False, **kwargs)
if logy:
ax.set_yscale('log')
if (ax.get_yaxis().get_scale() == 'log'):
logy = True
if (not logy):
ax.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
if (col == 0):
ax.set_ylabel(self.output_names[_i])
else:
ax.set_ylabel()
if (row == 0):
ax.set_title(self.input_names[_j])
if (row != (len(i) - 1)):
ax.set_xlabel()
ax.set_ylabel(ax.get_ylabel(), fontsize='x-small', rotation=0, horizontalAlignment='right', verticalAlignment='center', wrap=True)
ax.set_title(ax.get_title(), fontsize='x-small', horizontalAlignment='center', wrap=True)
fig.set_size_inches((14, 8))
fig.subplots_adjust(hspace=0.0, wspace=0.4)
fig.suptitle('Plot of G(f) frequency response matrix')
fig.subplots_adjust(left=0.2)
fig.align_ylabels()
if (not logy):
[ax.set_ylim(bottom=0.0) for ax in fig.get_axes()]
return (fig, axarr) | def plot(self, i=None, j=None, positive_f_only: bool=True, subplot_kwargs={}, logy=False, axarr=None, **kwargs):
'\n \n '
default_subplot_kwargs = {'sharex': 'col', 'sharey': 'row'}
subplot_kwargs = {**default_subplot_kwargs, **subplot_kwargs}
if (i is None):
i = range(self.G_f.shape[1])
if (j is None):
j = range(self.G_f.shape[2])
if (axarr is None):
(fig, axarr) = plt.subplots(len(i), len(j), **subplot_kwargs)
axarr = npy.matrix(axarr)
else:
fig = axarr[(0, 0)].get_figure()
for (row, _i) in enumerate(i):
for (col, _j) in enumerate(j):
try:
ax = axarr[(row, col)]
except IndexError:
break
self.plot_component(_i, _j, ax_magnitude=ax, plotPhase=False, **kwargs)
if logy:
ax.set_yscale('log')
if (ax.get_yaxis().get_scale() == 'log'):
logy = True
if (not logy):
ax.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
if (col == 0):
ax.set_ylabel(self.output_names[_i])
else:
ax.set_ylabel()
if (row == 0):
ax.set_title(self.input_names[_j])
if (row != (len(i) - 1)):
ax.set_xlabel()
ax.set_ylabel(ax.get_ylabel(), fontsize='x-small', rotation=0, horizontalAlignment='right', verticalAlignment='center', wrap=True)
ax.set_title(ax.get_title(), fontsize='x-small', horizontalAlignment='center', wrap=True)
fig.set_size_inches((14, 8))
fig.subplots_adjust(hspace=0.0, wspace=0.4)
fig.suptitle('Plot of G(f) frequency response matrix')
fig.subplots_adjust(left=0.2)
fig.align_ylabels()
if (not logy):
[ax.set_ylim(bottom=0.0) for ax in fig.get_axes()]
return (fig, axarr)<|docstring|>Function to plot frequency response matrix (f,G_f)<|endoftext|> |
64a104f2ea5926c693722d355f9cb9dd18f453a2bfa00e3ee208878b9b281913 | def plot_component(self, i: int, j: int, positive_f_only: bool=True, label_str: str=None, plotMagnitude: bool=True, ax_magnitude=None, plotPhase: bool=True, ax_phase=None, f_d: list=None) -> dict:
'\n Function to plot frequency response matrix (f,G_f)\n \n ***\n Required:\n \n * `i`, `j`; indices to denote component of frequency response matrix\n to be plotted\n \n ***\n Optional:\n \n Variables:\n \n * `label_str`, used to label series in plot legend. If provided, legend \n will be produced.\n \n * `f_d`, damped natural frequencies, used as vertical lines overlay\n \n Boolean options:\n \n * `plotMagnitude`, _boolean_, indicates whether magnitude plot required\n \n * `plotPhase`, _boolean_, indicates whether phase plot required\n \n Axes objects:\n \n * `ax_magnitude`, axes to which magnitude plot should be drawn\n \n * `ax_phase`, axes to which phase plot should be drawn\n \n If both plots are requested, axes should normally be submitted to both \n `ax_magnitude` and `ax_phase`. Failing this a new figure will be \n produced.\n \n ***\n Returns:\n \n `dict` containing figure and axes objects\n \n '
f = self.f
G_f = self.Gf[(:, i, j)]
if (f.shape[0] != G_f.shape[0]):
raise ValueError((('Error: shape of f and G_f different!\n' + 'f.shape: {0}\n'.format(f.shape)) + 'G_f.shape: {0}'.format(G_f.shape)))
if ((plotMagnitude and (ax_magnitude is None)) or (plotPhase and (ax_phase is None))):
if (plotMagnitude and plotPhase):
(fig, axarr) = plt.subplots(2, sharex=True)
ax_magnitude = axarr[0]
ax_phase = axarr[1]
else:
(fig, ax) = plt.subplots(1)
if plotMagnitude:
ax_magnitude = ax
else:
ax_phase = ax
fig.suptitle('Frequency response G(f)')
fig.set_size_inches((14, 8))
else:
fig = ax_magnitude.get_figure()
fmax = npy.max(f)
fmin = npy.min(f)
if positive_f_only:
fmin = 0
if plotMagnitude:
ax = ax_magnitude
ax.plot(f, npy.abs(G_f), label=label_str)
ax.set_xlim([fmin, fmax])
ax.set_xlabel('f (Hz)')
ax.set_ylabel('|G(f)|')
if (label_str is not None):
ax.legend()
if plotPhase:
ax = ax_phase
ax.plot(f, npy.angle(G_f), label=label_str)
ax.set_xlim([fmin, fmax])
ax.set_ylim([(- npy.pi), (+ npy.pi)])
ax.set_xlabel('f (Hz)')
ax.set_ylabel('Phase G(f) (rad)')
if (label_str is not None):
ax.legend()
if (f_d is not None):
for _f_d in f_d:
ax_magnitude.axvline(_f_d, linestyle='--')
ax_phase.axvline(_f_d, linestyle='--')
d = {}
d['fig'] = fig
d['ax_magnitude'] = ax_magnitude
d['ax_phase'] = ax_phase
return d | Function to plot frequency response matrix (f,G_f)
***
Required:
* `i`, `j`; indices to denote component of frequency response matrix
to be plotted
***
Optional:
Variables:
* `label_str`, used to label series in plot legend. If provided, legend
will be produced.
* `f_d`, damped natural frequencies, used as vertical lines overlay
Boolean options:
* `plotMagnitude`, _boolean_, indicates whether magnitude plot required
* `plotPhase`, _boolean_, indicates whether phase plot required
Axes objects:
* `ax_magnitude`, axes to which magnitude plot should be drawn
* `ax_phase`, axes to which phase plot should be drawn
If both plots are requested, axes should normally be submitted to both
`ax_magnitude` and `ax_phase`. Failing this a new figure will be
produced.
***
Returns:
`dict` containing figure and axes objects | dynsys/freq_response_results.py | plot_component | MEMAndersen/DynSys | 0 | python | def plot_component(self, i: int, j: int, positive_f_only: bool=True, label_str: str=None, plotMagnitude: bool=True, ax_magnitude=None, plotPhase: bool=True, ax_phase=None, f_d: list=None) -> dict:
'\n Function to plot frequency response matrix (f,G_f)\n \n ***\n Required:\n \n * `i`, `j`; indices to denote component of frequency response matrix\n to be plotted\n \n ***\n Optional:\n \n Variables:\n \n * `label_str`, used to label series in plot legend. If provided, legend \n will be produced.\n \n * `f_d`, damped natural frequencies, used as vertical lines overlay\n \n Boolean options:\n \n * `plotMagnitude`, _boolean_, indicates whether magnitude plot required\n \n * `plotPhase`, _boolean_, indicates whether phase plot required\n \n Axes objects:\n \n * `ax_magnitude`, axes to which magnitude plot should be drawn\n \n * `ax_phase`, axes to which phase plot should be drawn\n \n If both plots are requested, axes should normally be submitted to both \n `ax_magnitude` and `ax_phase`. Failing this a new figure will be \n produced.\n \n ***\n Returns:\n \n `dict` containing figure and axes objects\n \n '
f = self.f
G_f = self.Gf[(:, i, j)]
if (f.shape[0] != G_f.shape[0]):
raise ValueError((('Error: shape of f and G_f different!\n' + 'f.shape: {0}\n'.format(f.shape)) + 'G_f.shape: {0}'.format(G_f.shape)))
if ((plotMagnitude and (ax_magnitude is None)) or (plotPhase and (ax_phase is None))):
if (plotMagnitude and plotPhase):
(fig, axarr) = plt.subplots(2, sharex=True)
ax_magnitude = axarr[0]
ax_phase = axarr[1]
else:
(fig, ax) = plt.subplots(1)
if plotMagnitude:
ax_magnitude = ax
else:
ax_phase = ax
fig.suptitle('Frequency response G(f)')
fig.set_size_inches((14, 8))
else:
fig = ax_magnitude.get_figure()
fmax = npy.max(f)
fmin = npy.min(f)
if positive_f_only:
fmin = 0
if plotMagnitude:
ax = ax_magnitude
ax.plot(f, npy.abs(G_f), label=label_str)
ax.set_xlim([fmin, fmax])
ax.set_xlabel('f (Hz)')
ax.set_ylabel('|G(f)|')
if (label_str is not None):
ax.legend()
if plotPhase:
ax = ax_phase
ax.plot(f, npy.angle(G_f), label=label_str)
ax.set_xlim([fmin, fmax])
ax.set_ylim([(- npy.pi), (+ npy.pi)])
ax.set_xlabel('f (Hz)')
ax.set_ylabel('Phase G(f) (rad)')
if (label_str is not None):
ax.legend()
if (f_d is not None):
for _f_d in f_d:
ax_magnitude.axvline(_f_d, linestyle='--')
ax_phase.axvline(_f_d, linestyle='--')
d = {}
d['fig'] = fig
d['ax_magnitude'] = ax_magnitude
d['ax_phase'] = ax_phase
return d | def plot_component(self, i: int, j: int, positive_f_only: bool=True, label_str: str=None, plotMagnitude: bool=True, ax_magnitude=None, plotPhase: bool=True, ax_phase=None, f_d: list=None) -> dict:
'\n Function to plot frequency response matrix (f,G_f)\n \n ***\n Required:\n \n * `i`, `j`; indices to denote component of frequency response matrix\n to be plotted\n \n ***\n Optional:\n \n Variables:\n \n * `label_str`, used to label series in plot legend. If provided, legend \n will be produced.\n \n * `f_d`, damped natural frequencies, used as vertical lines overlay\n \n Boolean options:\n \n * `plotMagnitude`, _boolean_, indicates whether magnitude plot required\n \n * `plotPhase`, _boolean_, indicates whether phase plot required\n \n Axes objects:\n \n * `ax_magnitude`, axes to which magnitude plot should be drawn\n \n * `ax_phase`, axes to which phase plot should be drawn\n \n If both plots are requested, axes should normally be submitted to both \n `ax_magnitude` and `ax_phase`. Failing this a new figure will be \n produced.\n \n ***\n Returns:\n \n `dict` containing figure and axes objects\n \n '
f = self.f
G_f = self.Gf[(:, i, j)]
if (f.shape[0] != G_f.shape[0]):
raise ValueError((('Error: shape of f and G_f different!\n' + 'f.shape: {0}\n'.format(f.shape)) + 'G_f.shape: {0}'.format(G_f.shape)))
if ((plotMagnitude and (ax_magnitude is None)) or (plotPhase and (ax_phase is None))):
if (plotMagnitude and plotPhase):
(fig, axarr) = plt.subplots(2, sharex=True)
ax_magnitude = axarr[0]
ax_phase = axarr[1]
else:
(fig, ax) = plt.subplots(1)
if plotMagnitude:
ax_magnitude = ax
else:
ax_phase = ax
fig.suptitle('Frequency response G(f)')
fig.set_size_inches((14, 8))
else:
fig = ax_magnitude.get_figure()
fmax = npy.max(f)
fmin = npy.min(f)
if positive_f_only:
fmin = 0
if plotMagnitude:
ax = ax_magnitude
ax.plot(f, npy.abs(G_f), label=label_str)
ax.set_xlim([fmin, fmax])
ax.set_xlabel('f (Hz)')
ax.set_ylabel('|G(f)|')
if (label_str is not None):
ax.legend()
if plotPhase:
ax = ax_phase
ax.plot(f, npy.angle(G_f), label=label_str)
ax.set_xlim([fmin, fmax])
ax.set_ylim([(- npy.pi), (+ npy.pi)])
ax.set_xlabel('f (Hz)')
ax.set_ylabel('Phase G(f) (rad)')
if (label_str is not None):
ax.legend()
if (f_d is not None):
for _f_d in f_d:
ax_magnitude.axvline(_f_d, linestyle='--')
ax_phase.axvline(_f_d, linestyle='--')
d = {}
d['fig'] = fig
d['ax_magnitude'] = ax_magnitude
d['ax_phase'] = ax_phase
return d<|docstring|>Function to plot frequency response matrix (f,G_f)
***
Required:
* `i`, `j`; indices to denote component of frequency response matrix
to be plotted
***
Optional:
Variables:
* `label_str`, used to label series in plot legend. If provided, legend
will be produced.
* `f_d`, damped natural frequencies, used as vertical lines overlay
Boolean options:
* `plotMagnitude`, _boolean_, indicates whether magnitude plot required
* `plotPhase`, _boolean_, indicates whether phase plot required
Axes objects:
* `ax_magnitude`, axes to which magnitude plot should be drawn
* `ax_phase`, axes to which phase plot should be drawn
If both plots are requested, axes should normally be submitted to both
`ax_magnitude` and `ax_phase`. Failing this a new figure will be
produced.
***
Returns:
`dict` containing figure and axes objects<|endoftext|> |
01c40a2bcaaa88f6d293e26c3c41e3bade046f14f09b9650015b6f3994977bdc | def load(self):
'\n Loads train and test data. Constrain: Only the 10000 most frequent words in newswires are used\n Sets _dataset to (train_data, train_labels), (test_data, test_labels)\n\n :return: -\n '
self._dataset = reuters.load_data(num_words=10000) | Loads train and test data. Constrain: Only the 10000 most frequent words in newswires are used
Sets _dataset to (train_data, train_labels), (test_data, test_labels)
:return: - | Reuters_multiclass_classification/preprocessing.py | load | donK23/Thoughtful_DL | 1 | python | def load(self):
'\n Loads train and test data. Constrain: Only the 10000 most frequent words in newswires are used\n Sets _dataset to (train_data, train_labels), (test_data, test_labels)\n\n :return: -\n '
self._dataset = reuters.load_data(num_words=10000) | def load(self):
'\n Loads train and test data. Constrain: Only the 10000 most frequent words in newswires are used\n Sets _dataset to (train_data, train_labels), (test_data, test_labels)\n\n :return: -\n '
self._dataset = reuters.load_data(num_words=10000)<|docstring|>Loads train and test data. Constrain: Only the 10000 most frequent words in newswires are used
Sets _dataset to (train_data, train_labels), (test_data, test_labels)
:return: -<|endoftext|> |
b1d537b4e9272aaedc22b1f8b3c9329e1b0c4a028cb5b52a26c41b45827a145f | def preprocess(self):
'\n Data wrangling: One-hot-encode data & labels\n Sets _dataset to encoded vectors - (train_data, train_labels), (test_data, test_labels)\n\n :return: -\n '
def one_hot_encode(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for (i, sequence) in enumerate(sequences):
results[(i, sequence)] = 1.0
return results
encoded_train_data = one_hot_encode(self._dataset[0][0])
encoded_test_data = one_hot_encode(self._dataset[1][0])
encoded_train_labels = to_categorical(self._dataset[0][1])
encoded_test_labels = to_categorical(self._dataset[1][1])
self._dataset = ((encoded_train_data, encoded_train_labels), (encoded_test_data, encoded_test_labels)) | Data wrangling: One-hot-encode data & labels
Sets _dataset to encoded vectors - (train_data, train_labels), (test_data, test_labels)
:return: - | Reuters_multiclass_classification/preprocessing.py | preprocess | donK23/Thoughtful_DL | 1 | python | def preprocess(self):
'\n Data wrangling: One-hot-encode data & labels\n Sets _dataset to encoded vectors - (train_data, train_labels), (test_data, test_labels)\n\n :return: -\n '
def one_hot_encode(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for (i, sequence) in enumerate(sequences):
results[(i, sequence)] = 1.0
return results
encoded_train_data = one_hot_encode(self._dataset[0][0])
encoded_test_data = one_hot_encode(self._dataset[1][0])
encoded_train_labels = to_categorical(self._dataset[0][1])
encoded_test_labels = to_categorical(self._dataset[1][1])
self._dataset = ((encoded_train_data, encoded_train_labels), (encoded_test_data, encoded_test_labels)) | def preprocess(self):
'\n Data wrangling: One-hot-encode data & labels\n Sets _dataset to encoded vectors - (train_data, train_labels), (test_data, test_labels)\n\n :return: -\n '
def one_hot_encode(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for (i, sequence) in enumerate(sequences):
results[(i, sequence)] = 1.0
return results
encoded_train_data = one_hot_encode(self._dataset[0][0])
encoded_test_data = one_hot_encode(self._dataset[1][0])
encoded_train_labels = to_categorical(self._dataset[0][1])
encoded_test_labels = to_categorical(self._dataset[1][1])
self._dataset = ((encoded_train_data, encoded_train_labels), (encoded_test_data, encoded_test_labels))<|docstring|>Data wrangling: One-hot-encode data & labels
Sets _dataset to encoded vectors - (train_data, train_labels), (test_data, test_labels)
:return: -<|endoftext|> |
8fee99a02e059afd4fc4e362a5d746da861cdb7bca3e071a92084c8aee67db0e | def split_data(self):
'\n Split data into train, dev & test set\n Sets _dataset to train, dev & test Tuples (data, labels)\n\n :return: -\n '
(train_data, train_labels) = (self._dataset[0][0][1000:], self._dataset[0][1][1000:])
(dev_data, dev_labels) = (self._dataset[0][0][:1000], self._dataset[0][1][:1000])
(test_data, test_labels) = self._dataset[1]
self._dataset = ((train_data, train_labels), (dev_data, dev_labels), (test_data, test_labels)) | Split data into train, dev & test set
Sets _dataset to train, dev & test Tuples (data, labels)
:return: - | Reuters_multiclass_classification/preprocessing.py | split_data | donK23/Thoughtful_DL | 1 | python | def split_data(self):
'\n Split data into train, dev & test set\n Sets _dataset to train, dev & test Tuples (data, labels)\n\n :return: -\n '
(train_data, train_labels) = (self._dataset[0][0][1000:], self._dataset[0][1][1000:])
(dev_data, dev_labels) = (self._dataset[0][0][:1000], self._dataset[0][1][:1000])
(test_data, test_labels) = self._dataset[1]
self._dataset = ((train_data, train_labels), (dev_data, dev_labels), (test_data, test_labels)) | def split_data(self):
'\n Split data into train, dev & test set\n Sets _dataset to train, dev & test Tuples (data, labels)\n\n :return: -\n '
(train_data, train_labels) = (self._dataset[0][0][1000:], self._dataset[0][1][1000:])
(dev_data, dev_labels) = (self._dataset[0][0][:1000], self._dataset[0][1][:1000])
(test_data, test_labels) = self._dataset[1]
self._dataset = ((train_data, train_labels), (dev_data, dev_labels), (test_data, test_labels))<|docstring|>Split data into train, dev & test set
Sets _dataset to train, dev & test Tuples (data, labels)
:return: -<|endoftext|> |
c0d682ee2a76d1f26e037513bd1228cef117f16f38ad28d9a130aed3575b203f | def regress(self, img_original, body_bbox_list):
'\n args: \n img_original: original raw image (BGR order by using cv2.imread)\n body_bbox: bounding box around the target: (minX, minY, width, height)\n outputs:\n pred_vertices_img:\n pred_joints_vis_img:\n pred_rotmat\n pred_betas\n pred_camera\n bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])\n bboxTopLeft: bbox top left (redundant)\n boxScale_o2n: bbox scaling factor (redundant) \n '
pred_output_list = list()
for body_bbox in body_bbox_list:
(img, norm_img, boxScale_o2n, bboxTopLeft, bbox) = process_image_bbox(img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
if (img is None):
pred_output_list.append(None)
continue
with torch.no_grad():
(pred_rotmat, pred_betas, pred_camera) = self.model_regressor(norm_img.to(self.device))
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
smpl_output = self.smpl(betas=pred_betas, body_pose=pred_aa[(:, 3:)], global_orient=pred_aa[(:, :3)], pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0]
camTrans = pred_camera[1:]
pred_output = dict()
pred_vertices_bbox = convert_smpl_to_bbox(pred_vertices, camScale, camTrans)
pred_vertices_img = convert_bbox_to_oriIm(pred_vertices_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_joints_3d = pred_joints_3d[0].cpu().numpy()
pred_joints_vis = pred_joints_3d[(:, :3)]
pred_joints_vis_bbox = convert_smpl_to_bbox(pred_joints_vis, camScale, camTrans)
pred_joints_vis_img = convert_bbox_to_oriIm(pred_joints_vis_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_output['img_cropped'] = img[(:, :, ::(- 1))]
pred_output['pred_vertices_smpl'] = smpl_output.vertices[0].cpu().numpy()
pred_output['pred_vertices_img'] = pred_vertices_img
pred_output['pred_joints_img'] = pred_joints_vis_img
pred_aa_tensor = gu.rotation_matrix_to_angle_axis(pred_rotmat.detach().cpu()[0])
pred_output['pred_body_pose'] = pred_aa_tensor.cpu().numpy().reshape(1, 72)
pred_output['pred_rotmat'] = pred_rotmat.detach().cpu().numpy()
pred_output['pred_betas'] = pred_betas.detach().cpu().numpy()
pred_output['pred_camera'] = pred_camera
pred_output['bbox_top_left'] = bboxTopLeft
pred_output['bbox_scale_ratio'] = boxScale_o2n
pred_output['faces'] = self.smpl.faces
if self.use_smplx:
img_center = (np.array((img_original.shape[1], img_original.shape[0])) * 0.5)
pred_joints = smpl_output.right_hand_joints[0].cpu().numpy()
pred_joints_bbox = convert_smpl_to_bbox(pred_joints, camScale, camTrans)
pred_joints_img = convert_bbox_to_oriIm(pred_joints_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_output['right_hand_joints_img_coord'] = pred_joints_img
pred_joints = smpl_output.left_hand_joints[0].cpu().numpy()
pred_joints_bbox = convert_smpl_to_bbox(pred_joints, camScale, camTrans)
pred_joints_img = convert_bbox_to_oriIm(pred_joints_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_output['left_hand_joints_img_coord'] = pred_joints_img
pred_output_list.append(pred_output)
return pred_output_list | args:
img_original: original raw image (BGR order by using cv2.imread)
body_bbox: bounding box around the target: (minX, minY, width, height)
outputs:
pred_vertices_img:
pred_joints_vis_img:
pred_rotmat
pred_betas
pred_camera
bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
bboxTopLeft: bbox top left (redundant)
boxScale_o2n: bbox scaling factor (redundant) | bodymocap/body_mocap_api.py | regress | archonic/frankmocap | 1,612 | python | def regress(self, img_original, body_bbox_list):
'\n args: \n img_original: original raw image (BGR order by using cv2.imread)\n body_bbox: bounding box around the target: (minX, minY, width, height)\n outputs:\n pred_vertices_img:\n pred_joints_vis_img:\n pred_rotmat\n pred_betas\n pred_camera\n bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])\n bboxTopLeft: bbox top left (redundant)\n boxScale_o2n: bbox scaling factor (redundant) \n '
pred_output_list = list()
for body_bbox in body_bbox_list:
(img, norm_img, boxScale_o2n, bboxTopLeft, bbox) = process_image_bbox(img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
if (img is None):
pred_output_list.append(None)
continue
with torch.no_grad():
(pred_rotmat, pred_betas, pred_camera) = self.model_regressor(norm_img.to(self.device))
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
smpl_output = self.smpl(betas=pred_betas, body_pose=pred_aa[(:, 3:)], global_orient=pred_aa[(:, :3)], pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0]
camTrans = pred_camera[1:]
pred_output = dict()
pred_vertices_bbox = convert_smpl_to_bbox(pred_vertices, camScale, camTrans)
pred_vertices_img = convert_bbox_to_oriIm(pred_vertices_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_joints_3d = pred_joints_3d[0].cpu().numpy()
pred_joints_vis = pred_joints_3d[(:, :3)]
pred_joints_vis_bbox = convert_smpl_to_bbox(pred_joints_vis, camScale, camTrans)
pred_joints_vis_img = convert_bbox_to_oriIm(pred_joints_vis_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_output['img_cropped'] = img[(:, :, ::(- 1))]
pred_output['pred_vertices_smpl'] = smpl_output.vertices[0].cpu().numpy()
pred_output['pred_vertices_img'] = pred_vertices_img
pred_output['pred_joints_img'] = pred_joints_vis_img
pred_aa_tensor = gu.rotation_matrix_to_angle_axis(pred_rotmat.detach().cpu()[0])
pred_output['pred_body_pose'] = pred_aa_tensor.cpu().numpy().reshape(1, 72)
pred_output['pred_rotmat'] = pred_rotmat.detach().cpu().numpy()
pred_output['pred_betas'] = pred_betas.detach().cpu().numpy()
pred_output['pred_camera'] = pred_camera
pred_output['bbox_top_left'] = bboxTopLeft
pred_output['bbox_scale_ratio'] = boxScale_o2n
pred_output['faces'] = self.smpl.faces
if self.use_smplx:
img_center = (np.array((img_original.shape[1], img_original.shape[0])) * 0.5)
pred_joints = smpl_output.right_hand_joints[0].cpu().numpy()
pred_joints_bbox = convert_smpl_to_bbox(pred_joints, camScale, camTrans)
pred_joints_img = convert_bbox_to_oriIm(pred_joints_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_output['right_hand_joints_img_coord'] = pred_joints_img
pred_joints = smpl_output.left_hand_joints[0].cpu().numpy()
pred_joints_bbox = convert_smpl_to_bbox(pred_joints, camScale, camTrans)
pred_joints_img = convert_bbox_to_oriIm(pred_joints_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_output['left_hand_joints_img_coord'] = pred_joints_img
pred_output_list.append(pred_output)
return pred_output_list | def regress(self, img_original, body_bbox_list):
'\n args: \n img_original: original raw image (BGR order by using cv2.imread)\n body_bbox: bounding box around the target: (minX, minY, width, height)\n outputs:\n pred_vertices_img:\n pred_joints_vis_img:\n pred_rotmat\n pred_betas\n pred_camera\n bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])\n bboxTopLeft: bbox top left (redundant)\n boxScale_o2n: bbox scaling factor (redundant) \n '
pred_output_list = list()
for body_bbox in body_bbox_list:
(img, norm_img, boxScale_o2n, bboxTopLeft, bbox) = process_image_bbox(img_original, body_bbox, input_res=constants.IMG_RES)
bboxTopLeft = np.array(bboxTopLeft)
if (img is None):
pred_output_list.append(None)
continue
with torch.no_grad():
(pred_rotmat, pred_betas, pred_camera) = self.model_regressor(norm_img.to(self.device))
pred_aa = gu.rotation_matrix_to_angle_axis(pred_rotmat).cuda()
pred_aa = pred_aa.reshape(pred_aa.shape[0], 72)
smpl_output = self.smpl(betas=pred_betas, body_pose=pred_aa[(:, 3:)], global_orient=pred_aa[(:, :3)], pose2rot=True)
pred_vertices = smpl_output.vertices
pred_joints_3d = smpl_output.joints
pred_vertices = pred_vertices[0].cpu().numpy()
pred_camera = pred_camera.cpu().numpy().ravel()
camScale = pred_camera[0]
camTrans = pred_camera[1:]
pred_output = dict()
pred_vertices_bbox = convert_smpl_to_bbox(pred_vertices, camScale, camTrans)
pred_vertices_img = convert_bbox_to_oriIm(pred_vertices_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_joints_3d = pred_joints_3d[0].cpu().numpy()
pred_joints_vis = pred_joints_3d[(:, :3)]
pred_joints_vis_bbox = convert_smpl_to_bbox(pred_joints_vis, camScale, camTrans)
pred_joints_vis_img = convert_bbox_to_oriIm(pred_joints_vis_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_output['img_cropped'] = img[(:, :, ::(- 1))]
pred_output['pred_vertices_smpl'] = smpl_output.vertices[0].cpu().numpy()
pred_output['pred_vertices_img'] = pred_vertices_img
pred_output['pred_joints_img'] = pred_joints_vis_img
pred_aa_tensor = gu.rotation_matrix_to_angle_axis(pred_rotmat.detach().cpu()[0])
pred_output['pred_body_pose'] = pred_aa_tensor.cpu().numpy().reshape(1, 72)
pred_output['pred_rotmat'] = pred_rotmat.detach().cpu().numpy()
pred_output['pred_betas'] = pred_betas.detach().cpu().numpy()
pred_output['pred_camera'] = pred_camera
pred_output['bbox_top_left'] = bboxTopLeft
pred_output['bbox_scale_ratio'] = boxScale_o2n
pred_output['faces'] = self.smpl.faces
if self.use_smplx:
img_center = (np.array((img_original.shape[1], img_original.shape[0])) * 0.5)
pred_joints = smpl_output.right_hand_joints[0].cpu().numpy()
pred_joints_bbox = convert_smpl_to_bbox(pred_joints, camScale, camTrans)
pred_joints_img = convert_bbox_to_oriIm(pred_joints_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_output['right_hand_joints_img_coord'] = pred_joints_img
pred_joints = smpl_output.left_hand_joints[0].cpu().numpy()
pred_joints_bbox = convert_smpl_to_bbox(pred_joints, camScale, camTrans)
pred_joints_img = convert_bbox_to_oriIm(pred_joints_bbox, boxScale_o2n, bboxTopLeft, img_original.shape[1], img_original.shape[0])
pred_output['left_hand_joints_img_coord'] = pred_joints_img
pred_output_list.append(pred_output)
return pred_output_list<|docstring|>args:
img_original: original raw image (BGR order by using cv2.imread)
body_bbox: bounding box around the target: (minX, minY, width, height)
outputs:
pred_vertices_img:
pred_joints_vis_img:
pred_rotmat
pred_betas
pred_camera
bbox: [bbr[0], bbr[1],bbr[0]+bbr[2], bbr[1]+bbr[3]])
bboxTopLeft: bbox top left (redundant)
boxScale_o2n: bbox scaling factor (redundant)<|endoftext|> |
b233ac59008f1c7daf9b967ad45bad77986574324bd89a5a62858b22c40e4997 | def get_hand_bboxes(self, pred_body_list, img_shape):
'\n args: \n pred_body_list: output of body regresion\n img_shape: img_height, img_width\n outputs:\n hand_bbox_list: \n '
hand_bbox_list = list()
for pred_body in pred_body_list:
hand_bbox = dict(left_hand=None, right_hand=None)
if (pred_body is None):
hand_bbox_list.append(hand_bbox)
else:
for hand_type in hand_bbox:
key = f'{hand_type}_joints_img_coord'
pred_joints_vis_img = pred_body[key]
if (pred_joints_vis_img is not None):
(x0, x1) = (np.min(pred_joints_vis_img[(:, 0)]), np.max(pred_joints_vis_img[(:, 0)]))
(y0, y1) = (np.min(pred_joints_vis_img[(:, 1)]), np.max(pred_joints_vis_img[(:, 1)]))
(width, height) = ((x1 - x0), (y1 - y0))
margin = int((max(height, width) * 0.2))
(img_height, img_width) = img_shape
x0 = max((x0 - margin), 0)
y0 = max((y0 - margin), 0)
x1 = min((x1 + margin), img_width)
y1 = min((y1 + margin), img_height)
hand_bbox[hand_type] = np.array([x0, y0, (x1 - x0), (y1 - y0)])
hand_bbox_list.append(hand_bbox)
return hand_bbox_list | args:
pred_body_list: output of body regresion
img_shape: img_height, img_width
outputs:
hand_bbox_list: | bodymocap/body_mocap_api.py | get_hand_bboxes | archonic/frankmocap | 1,612 | python | def get_hand_bboxes(self, pred_body_list, img_shape):
'\n args: \n pred_body_list: output of body regresion\n img_shape: img_height, img_width\n outputs:\n hand_bbox_list: \n '
hand_bbox_list = list()
for pred_body in pred_body_list:
hand_bbox = dict(left_hand=None, right_hand=None)
if (pred_body is None):
hand_bbox_list.append(hand_bbox)
else:
for hand_type in hand_bbox:
key = f'{hand_type}_joints_img_coord'
pred_joints_vis_img = pred_body[key]
if (pred_joints_vis_img is not None):
(x0, x1) = (np.min(pred_joints_vis_img[(:, 0)]), np.max(pred_joints_vis_img[(:, 0)]))
(y0, y1) = (np.min(pred_joints_vis_img[(:, 1)]), np.max(pred_joints_vis_img[(:, 1)]))
(width, height) = ((x1 - x0), (y1 - y0))
margin = int((max(height, width) * 0.2))
(img_height, img_width) = img_shape
x0 = max((x0 - margin), 0)
y0 = max((y0 - margin), 0)
x1 = min((x1 + margin), img_width)
y1 = min((y1 + margin), img_height)
hand_bbox[hand_type] = np.array([x0, y0, (x1 - x0), (y1 - y0)])
hand_bbox_list.append(hand_bbox)
return hand_bbox_list | def get_hand_bboxes(self, pred_body_list, img_shape):
'\n args: \n pred_body_list: output of body regresion\n img_shape: img_height, img_width\n outputs:\n hand_bbox_list: \n '
hand_bbox_list = list()
for pred_body in pred_body_list:
hand_bbox = dict(left_hand=None, right_hand=None)
if (pred_body is None):
hand_bbox_list.append(hand_bbox)
else:
for hand_type in hand_bbox:
key = f'{hand_type}_joints_img_coord'
pred_joints_vis_img = pred_body[key]
if (pred_joints_vis_img is not None):
(x0, x1) = (np.min(pred_joints_vis_img[(:, 0)]), np.max(pred_joints_vis_img[(:, 0)]))
(y0, y1) = (np.min(pred_joints_vis_img[(:, 1)]), np.max(pred_joints_vis_img[(:, 1)]))
(width, height) = ((x1 - x0), (y1 - y0))
margin = int((max(height, width) * 0.2))
(img_height, img_width) = img_shape
x0 = max((x0 - margin), 0)
y0 = max((y0 - margin), 0)
x1 = min((x1 + margin), img_width)
y1 = min((y1 + margin), img_height)
hand_bbox[hand_type] = np.array([x0, y0, (x1 - x0), (y1 - y0)])
hand_bbox_list.append(hand_bbox)
return hand_bbox_list<|docstring|>args:
pred_body_list: output of body regresion
img_shape: img_height, img_width
outputs:
hand_bbox_list:<|endoftext|> |
6333afa5c8c68af12af057d14dff6618060ccb92183b71c5068be22d920fbb34 | def __init__(self, N: int, J: float, delta: float, h: float, penalty: float=0, s_target: int=0, trial_id: int=None):
'\n Args:\n N: System size.\n J:\n delta:\n h: Disorder strength.\n penalty: Penalty strength (or Lagrangian multiplier).\n s_target: The targeting total Sz charge sector.\n trial_id: ID of the current disorder trial.\n '
super(DimerXXZ, self).__init__(N)
self.J = J
self.delta = delta
self.h = h
self.penalty = penalty
self.s_target = s_target
self.trial_id = trial_id | Args:
N: System size.
J:
delta:
h: Disorder strength.
penalty: Penalty strength (or Lagrangian multiplier).
s_target: The targeting total Sz charge sector.
trial_id: ID of the current disorder trial. | tnpy/model/dimer_xxz.py | __init__ | tanlin2013/TNpy | 1 | python | def __init__(self, N: int, J: float, delta: float, h: float, penalty: float=0, s_target: int=0, trial_id: int=None):
'\n Args:\n N: System size.\n J:\n delta:\n h: Disorder strength.\n penalty: Penalty strength (or Lagrangian multiplier).\n s_target: The targeting total Sz charge sector.\n trial_id: ID of the current disorder trial.\n '
super(DimerXXZ, self).__init__(N)
self.J = J
self.delta = delta
self.h = h
self.penalty = penalty
self.s_target = s_target
self.trial_id = trial_id | def __init__(self, N: int, J: float, delta: float, h: float, penalty: float=0, s_target: int=0, trial_id: int=None):
'\n Args:\n N: System size.\n J:\n delta:\n h: Disorder strength.\n penalty: Penalty strength (or Lagrangian multiplier).\n s_target: The targeting total Sz charge sector.\n trial_id: ID of the current disorder trial.\n '
super(DimerXXZ, self).__init__(N)
self.J = J
self.delta = delta
self.h = h
self.penalty = penalty
self.s_target = s_target
self.trial_id = trial_id<|docstring|>Args:
N: System size.
J:
delta:
h: Disorder strength.
penalty: Penalty strength (or Lagrangian multiplier).
s_target: The targeting total Sz charge sector.
trial_id: ID of the current disorder trial.<|endoftext|> |
cc74bc5abd28efa83b3c92f67721ef8349af01ce5703f8c19d2f95ee944ffb7e | def _write_to_bigquery(testcase, progression_range_start, progression_range_end):
'Write the fixed range to BigQuery.'
big_query.write_range(table_id='fixeds', testcase=testcase, range_name='fixed', start=progression_range_start, end=progression_range_end) | Write the fixed range to BigQuery. | src/python/bot/tasks/progression_task.py | _write_to_bigquery | eepeep/clusterfuzz | 3 | python | def _write_to_bigquery(testcase, progression_range_start, progression_range_end):
big_query.write_range(table_id='fixeds', testcase=testcase, range_name='fixed', start=progression_range_start, end=progression_range_end) | def _write_to_bigquery(testcase, progression_range_start, progression_range_end):
big_query.write_range(table_id='fixeds', testcase=testcase, range_name='fixed', start=progression_range_start, end=progression_range_end)<|docstring|>Write the fixed range to BigQuery.<|endoftext|> |
bbfa9177354e2f217fa069f1c8b9dfdad86bb443f24875d48d752e851c2cd298 | def _clear_progression_pending(testcase):
'If we marked progression as pending for this testcase, clear that state.'
if (not testcase.get_metadata('progression_pending')):
return
testcase.delete_metadata('progression_pending', update_testcase=False) | If we marked progression as pending for this testcase, clear that state. | src/python/bot/tasks/progression_task.py | _clear_progression_pending | eepeep/clusterfuzz | 3 | python | def _clear_progression_pending(testcase):
if (not testcase.get_metadata('progression_pending')):
return
testcase.delete_metadata('progression_pending', update_testcase=False) | def _clear_progression_pending(testcase):
if (not testcase.get_metadata('progression_pending')):
return
testcase.delete_metadata('progression_pending', update_testcase=False)<|docstring|>If we marked progression as pending for this testcase, clear that state.<|endoftext|> |
518b1814b7aabeb83eb07be99581439376373f0c83bae6a37ed8c822923c9d09 | def _update_completion_metadata(testcase, revision, is_crash=False, message=None):
'Update metadata the progression task completes.'
_clear_progression_pending(testcase)
testcase.set_metadata('last_tested_revision', revision, update_testcase=False)
if is_crash:
testcase.set_metadata('last_tested_crash_revision', revision, update_testcase=False)
testcase.set_metadata('last_tested_crash_time', utils.utcnow(), update_testcase=False)
if (not testcase.open):
testcase.set_metadata('closed_time', utils.utcnow(), update_testcase=False)
data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED, message) | Update metadata the progression task completes. | src/python/bot/tasks/progression_task.py | _update_completion_metadata | eepeep/clusterfuzz | 3 | python | def _update_completion_metadata(testcase, revision, is_crash=False, message=None):
_clear_progression_pending(testcase)
testcase.set_metadata('last_tested_revision', revision, update_testcase=False)
if is_crash:
testcase.set_metadata('last_tested_crash_revision', revision, update_testcase=False)
testcase.set_metadata('last_tested_crash_time', utils.utcnow(), update_testcase=False)
if (not testcase.open):
testcase.set_metadata('closed_time', utils.utcnow(), update_testcase=False)
data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED, message) | def _update_completion_metadata(testcase, revision, is_crash=False, message=None):
_clear_progression_pending(testcase)
testcase.set_metadata('last_tested_revision', revision, update_testcase=False)
if is_crash:
testcase.set_metadata('last_tested_crash_revision', revision, update_testcase=False)
testcase.set_metadata('last_tested_crash_time', utils.utcnow(), update_testcase=False)
if (not testcase.open):
testcase.set_metadata('closed_time', utils.utcnow(), update_testcase=False)
data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED, message)<|docstring|>Update metadata the progression task completes.<|endoftext|> |
f1196498bcb2a629721821f50e1a0992d3c7998624703e9fef4eafe4026f8a01 | def _log_output(revision, crash_result):
'Log process output.'
logs.log(('Testing %s.' % revision), revision=revision, output=crash_result.get_stacktrace(symbolized=True)) | Log process output. | src/python/bot/tasks/progression_task.py | _log_output | eepeep/clusterfuzz | 3 | python | def _log_output(revision, crash_result):
logs.log(('Testing %s.' % revision), revision=revision, output=crash_result.get_stacktrace(symbolized=True)) | def _log_output(revision, crash_result):
logs.log(('Testing %s.' % revision), revision=revision, output=crash_result.get_stacktrace(symbolized=True))<|docstring|>Log process output.<|endoftext|> |
22743af903803584c8fea78dcd11958491f223bc931dccd84c906a72dd183551 | def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):
'Simplified fixed check for test cases using custom binaries.'
revision = environment.get_value('APP_REVISION')
testcase_id = testcase.key.id()
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)
build_manager.setup_build()
if (not build_manager.check_app_path()):
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Build setup failed for custom binary')
build_fail_wait = environment.get_value('FAIL_WAIT')
tasks.add_task('progression', testcase_id, job_type, wait_time=build_fail_wait)
return
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
result = testcase_manager.test_for_crash_with_retries(testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
_log_output(revision, result)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
if result.is_crash():
app_path = environment.get_value('APP_PATH')
command = testcase_manager.get_command_line_for_application(testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
stacktrace = utils.get_crash_stacktrace_output(command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(stacktrace)
_update_completion_metadata(testcase, revision, is_crash=True, message='still crashes on latest custom build')
return
if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
tasks.add_task('progression', testcase_id, job_type)
_update_completion_metadata(testcase, revision)
return
testcase.fixed = 'Yes'
testcase.open = False
_update_completion_metadata(testcase, revision, message='fixed on latest custom build') | Simplified fixed check for test cases using custom binaries. | src/python/bot/tasks/progression_task.py | _check_fixed_for_custom_binary | eepeep/clusterfuzz | 3 | python | def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):
revision = environment.get_value('APP_REVISION')
testcase_id = testcase.key.id()
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)
build_manager.setup_build()
if (not build_manager.check_app_path()):
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Build setup failed for custom binary')
build_fail_wait = environment.get_value('FAIL_WAIT')
tasks.add_task('progression', testcase_id, job_type, wait_time=build_fail_wait)
return
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
result = testcase_manager.test_for_crash_with_retries(testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
_log_output(revision, result)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
if result.is_crash():
app_path = environment.get_value('APP_PATH')
command = testcase_manager.get_command_line_for_application(testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
stacktrace = utils.get_crash_stacktrace_output(command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(stacktrace)
_update_completion_metadata(testcase, revision, is_crash=True, message='still crashes on latest custom build')
return
if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
tasks.add_task('progression', testcase_id, job_type)
_update_completion_metadata(testcase, revision)
return
testcase.fixed = 'Yes'
testcase.open = False
_update_completion_metadata(testcase, revision, message='fixed on latest custom build') | def _check_fixed_for_custom_binary(testcase, job_type, testcase_file_path):
revision = environment.get_value('APP_REVISION')
testcase_id = testcase.key.id()
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)
build_manager.setup_build()
if (not build_manager.check_app_path()):
testcase = data_handler.get_testcase_by_id(testcase_id)
data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR, 'Build setup failed for custom binary')
build_fail_wait = environment.get_value('FAIL_WAIT')
tasks.add_task('progression', testcase_id, job_type, wait_time=build_fail_wait)
return
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
result = testcase_manager.test_for_crash_with_retries(testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
_log_output(revision, result)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
if result.is_crash():
app_path = environment.get_value('APP_PATH')
command = testcase_manager.get_command_line_for_application(testcase_file_path, app_path=app_path, needs_http=testcase.http_flag)
symbolized_crash_stacktrace = result.get_stacktrace(symbolized=True)
unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)
stacktrace = utils.get_crash_stacktrace_output(command, symbolized_crash_stacktrace, unsymbolized_crash_stacktrace)
testcase.last_tested_crash_stacktrace = data_handler.filter_stacktrace(stacktrace)
_update_completion_metadata(testcase, revision, is_crash=True, message='still crashes on latest custom build')
return
if data_handler.is_first_retry_for_task(testcase, reset_after_retry=True):
tasks.add_task('progression', testcase_id, job_type)
_update_completion_metadata(testcase, revision)
return
testcase.fixed = 'Yes'
testcase.open = False
_update_completion_metadata(testcase, revision, message='fixed on latest custom build')<|docstring|>Simplified fixed check for test cases using custom binaries.<|endoftext|> |
494a688cc48f29a00ea948f7d404cab423cc5c2d782ed8c0f891470d99a796cd | def _update_issue_metadata(testcase):
'Update issue metadata.'
if testcase.uploader_email:
return
metadata = engine_common.get_all_issue_metadata_for_testcase(testcase)
if (not metadata):
return
for (key, value) in six.iteritems(metadata):
old_value = testcase.get_metadata(key)
if (old_value != value):
logs.log('Updating issue metadata for {} from {} to {}.'.format(key, old_value, value))
testcase.set_metadata(key, value) | Update issue metadata. | src/python/bot/tasks/progression_task.py | _update_issue_metadata | eepeep/clusterfuzz | 3 | python | def _update_issue_metadata(testcase):
if testcase.uploader_email:
return
metadata = engine_common.get_all_issue_metadata_for_testcase(testcase)
if (not metadata):
return
for (key, value) in six.iteritems(metadata):
old_value = testcase.get_metadata(key)
if (old_value != value):
logs.log('Updating issue metadata for {} from {} to {}.'.format(key, old_value, value))
testcase.set_metadata(key, value) | def _update_issue_metadata(testcase):
if testcase.uploader_email:
return
metadata = engine_common.get_all_issue_metadata_for_testcase(testcase)
if (not metadata):
return
for (key, value) in six.iteritems(metadata):
old_value = testcase.get_metadata(key)
if (old_value != value):
logs.log('Updating issue metadata for {} from {} to {}.'.format(key, old_value, value))
testcase.set_metadata(key, value)<|docstring|>Update issue metadata.<|endoftext|> |
76e810e54252c75c4dd44c02969ae329e8c40a68464baa96d84c1292bcf2ed2e | def _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, revision, update_metadata=False):
'Test to see if a test case reproduces in the specified revision.'
build_manager.setup_build(revision)
if (not build_manager.check_app_path()):
raise errors.BuildSetupError(revision, job_type)
if testcase_manager.check_for_bad_build(job_type, revision):
log_message = ('Bad build at r%d. Skipping' % revision)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP, log_message)
raise errors.BadBuildError(revision, job_type)
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
result = testcase_manager.test_for_crash_with_retries(testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
_log_output(revision, result)
if update_metadata:
_update_issue_metadata(testcase)
return result | Test to see if a test case reproduces in the specified revision. | src/python/bot/tasks/progression_task.py | _testcase_reproduces_in_revision | eepeep/clusterfuzz | 3 | python | def _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, revision, update_metadata=False):
build_manager.setup_build(revision)
if (not build_manager.check_app_path()):
raise errors.BuildSetupError(revision, job_type)
if testcase_manager.check_for_bad_build(job_type, revision):
log_message = ('Bad build at r%d. Skipping' % revision)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP, log_message)
raise errors.BadBuildError(revision, job_type)
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
result = testcase_manager.test_for_crash_with_retries(testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
_log_output(revision, result)
if update_metadata:
_update_issue_metadata(testcase)
return result | def _testcase_reproduces_in_revision(testcase, testcase_file_path, job_type, revision, update_metadata=False):
build_manager.setup_build(revision)
if (not build_manager.check_app_path()):
raise errors.BuildSetupError(revision, job_type)
if testcase_manager.check_for_bad_build(job_type, revision):
log_message = ('Bad build at r%d. Skipping' % revision)
testcase = data_handler.get_testcase_by_id(testcase.key.id())
data_handler.update_testcase_comment(testcase, data_types.TaskState.WIP, log_message)
raise errors.BadBuildError(revision, job_type)
test_timeout = environment.get_value('TEST_TIMEOUT', 10)
result = testcase_manager.test_for_crash_with_retries(testcase, testcase_file_path, test_timeout, http_flag=testcase.http_flag)
_log_output(revision, result)
if update_metadata:
_update_issue_metadata(testcase)
return result<|docstring|>Test to see if a test case reproduces in the specified revision.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.