repo_name stringlengths 7 65 | path stringlengths 5 185 | copies stringlengths 1 4 | size stringlengths 4 6 | content stringlengths 977 990k | license stringclasses 14 values | hash stringlengths 32 32 | line_mean float64 7.18 99.4 | line_max int64 31 999 | alpha_frac float64 0.25 0.95 | ratio float64 1.5 7.84 | autogenerated bool 1 class | config_or_test bool 2 classes | has_no_keywords bool 2 classes | has_few_assignments bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
twilio/twilio-python | tests/integration/api/v2010/test_account.py | 1 | 18728 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class AccountTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts.create()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts.json',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"auth_token": "auth_token",
"date_created": "Thu, 30 Jul 2015 20:00:00 +0000",
"date_updated": "Thu, 30 Jul 2015 20:00:00 +0000",
"friendly_name": "friendly_name",
"owner_account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"status": "active",
"subresource_uris": {
"available_phone_numbers": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers.json",
"calls": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Calls.json",
"conferences": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences.json",
"incoming_phone_numbers": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers.json",
"notifications": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Notifications.json",
"outgoing_caller_ids": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json",
"recordings": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings.json",
"transcriptions": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Transcriptions.json",
"addresses": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Addresses.json",
"signing_keys": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SigningKeys.json",
"connect_apps": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps.json",
"sip": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP.json",
"authorized_connect_apps": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AuthorizedConnectApps.json",
"usage": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage.json",
"keys": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Keys.json",
"applications": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications.json",
"short_codes": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json",
"queues": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues.json",
"messages": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages.json",
"balance": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Balance.json"
},
"type": "Full",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts.create()
self.assertIsNotNone(actual)
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"auth_token": "auth_token",
"date_created": "Thu, 30 Jul 2015 20:00:00 +0000",
"date_updated": "Thu, 30 Jul 2015 20:00:00 +0000",
"friendly_name": "friendly_name",
"owner_account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"status": "active",
"subresource_uris": {
"available_phone_numbers": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers.json",
"calls": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Calls.json",
"conferences": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences.json",
"incoming_phone_numbers": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers.json",
"notifications": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Notifications.json",
"outgoing_caller_ids": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json",
"recordings": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings.json",
"transcriptions": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Transcriptions.json",
"addresses": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Addresses.json",
"signing_keys": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SigningKeys.json",
"connect_apps": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps.json",
"sip": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP.json",
"authorized_connect_apps": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AuthorizedConnectApps.json",
"usage": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage.json",
"keys": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Keys.json",
"applications": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications.json",
"short_codes": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json",
"queues": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues.json",
"messages": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages.json",
"balance": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Balance.json"
},
"type": "Full",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts.json',
))
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"first_page_uri": "/2010-04-01/Accounts.json?FriendlyName=friendly_name&Status=active&PageSize=50&Page=0",
"end": 0,
"previous_page_uri": null,
"accounts": [],
"uri": "/2010-04-01/Accounts.json?FriendlyName=friendly_name&Status=active&PageSize=50&Page=0",
"page_size": 50,
"start": 0,
"next_page_uri": null,
"page": 0
}
'''
))
actual = self.client.api.v2010.accounts.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"first_page_uri": "/2010-04-01/Accounts.json?FriendlyName=friendly_name&Status=active&PageSize=50&Page=0",
"end": 0,
"previous_page_uri": null,
"accounts": [
{
"auth_token": "auth_token",
"date_created": "Thu, 30 Jul 2015 20:00:00 +0000",
"date_updated": "Thu, 30 Jul 2015 20:00:00 +0000",
"friendly_name": "friendly_name",
"owner_account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"status": "active",
"subresource_uris": {
"available_phone_numbers": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers.json",
"calls": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Calls.json",
"conferences": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences.json",
"incoming_phone_numbers": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers.json",
"notifications": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Notifications.json",
"outgoing_caller_ids": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json",
"recordings": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings.json",
"transcriptions": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Transcriptions.json",
"addresses": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Addresses.json",
"signing_keys": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SigningKeys.json",
"connect_apps": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps.json",
"sip": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP.json",
"authorized_connect_apps": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AuthorizedConnectApps.json",
"usage": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage.json",
"keys": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Keys.json",
"applications": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications.json",
"short_codes": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json",
"queues": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues.json",
"messages": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages.json",
"balance": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Balance.json"
},
"type": "Full",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
],
"uri": "/2010-04-01/Accounts.json?FriendlyName=friendly_name&Status=active&PageSize=50&Page=0",
"page_size": 50,
"start": 0,
"next_page_uri": null,
"page": 0
}
'''
))
actual = self.client.api.v2010.accounts.list()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"auth_token": "auth_token",
"date_created": "Thu, 30 Jul 2015 20:00:00 +0000",
"date_updated": "Thu, 30 Jul 2015 20:00:00 +0000",
"friendly_name": "friendly_name",
"owner_account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"status": "active",
"subresource_uris": {
"available_phone_numbers": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers.json",
"calls": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Calls.json",
"conferences": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences.json",
"incoming_phone_numbers": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers.json",
"notifications": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Notifications.json",
"outgoing_caller_ids": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json",
"recordings": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings.json",
"transcriptions": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Transcriptions.json",
"addresses": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Addresses.json",
"signing_keys": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SigningKeys.json",
"connect_apps": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps.json",
"sip": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP.json",
"authorized_connect_apps": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AuthorizedConnectApps.json",
"usage": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage.json",
"keys": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Keys.json",
"applications": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications.json",
"short_codes": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json",
"queues": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues.json",
"messages": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages.json",
"balance": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Balance.json"
},
"type": "Full",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
def test_update_with_numeric_status_response(self):
self.holodeck.mock(Response(
200,
'''
{
"auth_token": "auth_token",
"date_created": "Thu, 30 Jul 2015 20:00:00 +0000",
"date_updated": "Thu, 30 Jul 2015 20:00:00 +0000",
"friendly_name": "friendly_name",
"owner_account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"status": "active",
"subresource_uris": {
"available_phone_numbers": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AvailablePhoneNumbers.json",
"calls": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Calls.json",
"conferences": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Conferences.json",
"incoming_phone_numbers": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/IncomingPhoneNumbers.json",
"notifications": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Notifications.json",
"outgoing_caller_ids": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/OutgoingCallerIds.json",
"recordings": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Recordings.json",
"transcriptions": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Transcriptions.json",
"addresses": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Addresses.json",
"signing_keys": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SigningKeys.json",
"connect_apps": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/ConnectApps.json",
"sip": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SIP.json",
"authorized_connect_apps": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/AuthorizedConnectApps.json",
"usage": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Usage.json",
"keys": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Keys.json",
"applications": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Applications.json",
"short_codes": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/SMS/ShortCodes.json",
"queues": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues.json",
"messages": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Messages.json",
"balance": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Balance.json"
},
"type": "Full",
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update()
self.assertIsNotNone(actual)
| mit | 02ae2a3e54e6dde88cc7112314b2fd05 | 58.833866 | 140 | 0.609035 | 4.548943 | false | false | false | false |
twilio/twilio-python | twilio/rest/conversations/v1/user/__init__.py | 1 | 17720 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.conversations.v1.user.user_conversation import UserConversationList
class UserList(ListResource):
def __init__(self, version):
"""
Initialize the UserList
:param Version version: Version that contains the resource
:returns: twilio.rest.conversations.v1.user.UserList
:rtype: twilio.rest.conversations.v1.user.UserList
"""
super(UserList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Users'.format(**self._solution)
def create(self, identity, friendly_name=values.unset, attributes=values.unset,
role_sid=values.unset, x_twilio_webhook_enabled=values.unset):
"""
Create the UserInstance
:param unicode identity: The string that identifies the resource's User
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode attributes: The JSON Object string that stores application-specific data
:param unicode role_sid: The SID of a service-level Role to assign to the user
:param UserInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The created UserInstance
:rtype: twilio.rest.conversations.v1.user.UserInstance
"""
data = values.of({
'Identity': identity,
'FriendlyName': friendly_name,
'Attributes': attributes,
'RoleSid': role_sid,
})
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
payload = self._version.create(method='POST', uri=self._uri, data=data, headers=headers, )
return UserInstance(self._version, payload, )
def stream(self, limit=None, page_size=None):
"""
Streams UserInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.conversations.v1.user.UserInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists UserInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.conversations.v1.user.UserInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of UserInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of UserInstance
:rtype: twilio.rest.conversations.v1.user.UserPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return UserPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of UserInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of UserInstance
:rtype: twilio.rest.conversations.v1.user.UserPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return UserPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a UserContext
:param sid: The SID of the User resource to fetch
:returns: twilio.rest.conversations.v1.user.UserContext
:rtype: twilio.rest.conversations.v1.user.UserContext
"""
return UserContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a UserContext
:param sid: The SID of the User resource to fetch
:returns: twilio.rest.conversations.v1.user.UserContext
:rtype: twilio.rest.conversations.v1.user.UserContext
"""
return UserContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Conversations.V1.UserList>'
class UserPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the UserPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.conversations.v1.user.UserPage
:rtype: twilio.rest.conversations.v1.user.UserPage
"""
super(UserPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of UserInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.conversations.v1.user.UserInstance
:rtype: twilio.rest.conversations.v1.user.UserInstance
"""
return UserInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Conversations.V1.UserPage>'
class UserContext(InstanceContext):
def __init__(self, version, sid):
"""
Initialize the UserContext
:param Version version: Version that contains the resource
:param sid: The SID of the User resource to fetch
:returns: twilio.rest.conversations.v1.user.UserContext
:rtype: twilio.rest.conversations.v1.user.UserContext
"""
super(UserContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Users/{sid}'.format(**self._solution)
# Dependents
self._user_conversations = None
def update(self, friendly_name=values.unset, attributes=values.unset,
role_sid=values.unset, x_twilio_webhook_enabled=values.unset):
"""
Update the UserInstance
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode attributes: The JSON Object string that stores application-specific data
:param unicode role_sid: The SID of a service-level Role to assign to the user
:param UserInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The updated UserInstance
:rtype: twilio.rest.conversations.v1.user.UserInstance
"""
data = values.of({'FriendlyName': friendly_name, 'Attributes': attributes, 'RoleSid': role_sid, })
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
payload = self._version.update(method='POST', uri=self._uri, data=data, headers=headers, )
return UserInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self, x_twilio_webhook_enabled=values.unset):
"""
Deletes the UserInstance
:param UserInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
headers = values.of({'X-Twilio-Webhook-Enabled': x_twilio_webhook_enabled, })
return self._version.delete(method='DELETE', uri=self._uri, headers=headers, )
def fetch(self):
"""
Fetch the UserInstance
:returns: The fetched UserInstance
:rtype: twilio.rest.conversations.v1.user.UserInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return UserInstance(self._version, payload, sid=self._solution['sid'], )
@property
def user_conversations(self):
"""
Access the user_conversations
:returns: twilio.rest.conversations.v1.user.user_conversation.UserConversationList
:rtype: twilio.rest.conversations.v1.user.user_conversation.UserConversationList
"""
if self._user_conversations is None:
self._user_conversations = UserConversationList(self._version, user_sid=self._solution['sid'], )
return self._user_conversations
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Conversations.V1.UserContext {}>'.format(context)
class UserInstance(InstanceResource):
class WebhookEnabledType(object):
TRUE = "true"
FALSE = "false"
def __init__(self, version, payload, sid=None):
"""
Initialize the UserInstance
:returns: twilio.rest.conversations.v1.user.UserInstance
:rtype: twilio.rest.conversations.v1.user.UserInstance
"""
super(UserInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'account_sid': payload.get('account_sid'),
'chat_service_sid': payload.get('chat_service_sid'),
'role_sid': payload.get('role_sid'),
'identity': payload.get('identity'),
'friendly_name': payload.get('friendly_name'),
'attributes': payload.get('attributes'),
'is_online': payload.get('is_online'),
'is_notifiable': payload.get('is_notifiable'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'url': payload.get('url'),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: UserContext for this UserInstance
:rtype: twilio.rest.conversations.v1.user.UserContext
"""
if self._context is None:
self._context = UserContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def chat_service_sid(self):
"""
:returns: The SID of the Conversation Service that the resource is associated with
:rtype: unicode
"""
return self._properties['chat_service_sid']
@property
def role_sid(self):
"""
:returns: The SID of a service-level Role assigned to the user
:rtype: unicode
"""
return self._properties['role_sid']
@property
def identity(self):
"""
:returns: The string that identifies the resource's User
:rtype: unicode
"""
return self._properties['identity']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def attributes(self):
"""
:returns: The JSON Object string that stores application-specific data
:rtype: unicode
"""
return self._properties['attributes']
@property
def is_online(self):
"""
:returns: Whether the User is actively connected to this Conversations Service and online
:rtype: bool
"""
return self._properties['is_online']
@property
def is_notifiable(self):
"""
:returns: Whether the User has a potentially valid Push Notification registration for this Conversations Service
:rtype: bool
"""
return self._properties['is_notifiable']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: An absolute URL for this user.
:rtype: unicode
"""
return self._properties['url']
@property
def links(self):
"""
:returns: The links
:rtype: unicode
"""
return self._properties['links']
def update(self, friendly_name=values.unset, attributes=values.unset,
role_sid=values.unset, x_twilio_webhook_enabled=values.unset):
"""
Update the UserInstance
:param unicode friendly_name: The string that you assigned to describe the resource
:param unicode attributes: The JSON Object string that stores application-specific data
:param unicode role_sid: The SID of a service-level Role to assign to the user
:param UserInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: The updated UserInstance
:rtype: twilio.rest.conversations.v1.user.UserInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
attributes=attributes,
role_sid=role_sid,
x_twilio_webhook_enabled=x_twilio_webhook_enabled,
)
def delete(self, x_twilio_webhook_enabled=values.unset):
"""
Deletes the UserInstance
:param UserInstance.WebhookEnabledType x_twilio_webhook_enabled: The X-Twilio-Webhook-Enabled HTTP request header
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete(x_twilio_webhook_enabled=x_twilio_webhook_enabled, )
def fetch(self):
"""
Fetch the UserInstance
:returns: The fetched UserInstance
:rtype: twilio.rest.conversations.v1.user.UserInstance
"""
return self._proxy.fetch()
@property
def user_conversations(self):
"""
Access the user_conversations
:returns: twilio.rest.conversations.v1.user.user_conversation.UserConversationList
:rtype: twilio.rest.conversations.v1.user.user_conversation.UserConversationList
"""
return self._proxy.user_conversations
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Conversations.V1.UserInstance {}>'.format(context)
| mit | 863cef7fe3848ff1bfb83f34929ec7dd | 33.95069 | 121 | 0.623815 | 4.243295 | false | false | false | false |
twilio/twilio-python | twilio/rest/verify/v2/form.py | 1 | 6877 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class FormList(ListResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version):
"""
Initialize the FormList
:param Version version: Version that contains the resource
:returns: twilio.rest.verify.v2.form.FormList
:rtype: twilio.rest.verify.v2.form.FormList
"""
super(FormList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self, form_type):
"""
Constructs a FormContext
:param form_type: The Type of this Form
:returns: twilio.rest.verify.v2.form.FormContext
:rtype: twilio.rest.verify.v2.form.FormContext
"""
return FormContext(self._version, form_type=form_type, )
def __call__(self, form_type):
"""
Constructs a FormContext
:param form_type: The Type of this Form
:returns: twilio.rest.verify.v2.form.FormContext
:rtype: twilio.rest.verify.v2.form.FormContext
"""
return FormContext(self._version, form_type=form_type, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2.FormList>'
class FormPage(Page):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, response, solution):
"""
Initialize the FormPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.verify.v2.form.FormPage
:rtype: twilio.rest.verify.v2.form.FormPage
"""
super(FormPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FormInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.verify.v2.form.FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
return FormInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Verify.V2.FormPage>'
class FormContext(InstanceContext):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
def __init__(self, version, form_type):
"""
Initialize the FormContext
:param Version version: Version that contains the resource
:param form_type: The Type of this Form
:returns: twilio.rest.verify.v2.form.FormContext
:rtype: twilio.rest.verify.v2.form.FormContext
"""
super(FormContext, self).__init__(version)
# Path Solution
self._solution = {'form_type': form_type, }
self._uri = '/Forms/{form_type}'.format(**self._solution)
def fetch(self):
"""
Fetch the FormInstance
:returns: The fetched FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return FormInstance(self._version, payload, form_type=self._solution['form_type'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Verify.V2.FormContext {}>'.format(context)
class FormInstance(InstanceResource):
""" PLEASE NOTE that this class contains beta products that are subject to
change. Use them with caution. """
class FormTypes(object):
FORM_PUSH = "form-push"
def __init__(self, version, payload, form_type=None):
"""
Initialize the FormInstance
:returns: twilio.rest.verify.v2.form.FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
super(FormInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'form_type': payload.get('form_type'),
'forms': payload.get('forms'),
'form_meta': payload.get('form_meta'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'form_type': form_type or self._properties['form_type'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FormContext for this FormInstance
:rtype: twilio.rest.verify.v2.form.FormContext
"""
if self._context is None:
self._context = FormContext(self._version, form_type=self._solution['form_type'], )
return self._context
@property
def form_type(self):
"""
:returns: The Type of this Form
:rtype: FormInstance.FormTypes
"""
return self._properties['form_type']
@property
def forms(self):
"""
:returns: Object that contains the available forms for this type.
:rtype: dict
"""
return self._properties['forms']
@property
def form_meta(self):
"""
:returns: Additional information for the available forms for this type.
:rtype: dict
"""
return self._properties['form_meta']
@property
def url(self):
"""
:returns: The URL to access the forms for this type.
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch the FormInstance
:returns: The fetched FormInstance
:rtype: twilio.rest.verify.v2.form.FormInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Verify.V2.FormInstance {}>'.format(context)
| mit | 7009debb24c79f2c892b2b8e014081d5 | 27.894958 | 95 | 0.599098 | 4.175471 | false | false | false | false |
twilio/twilio-python | twilio/rest/preview/deployed_devices/fleet/__init__.py | 1 | 17975 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.preview.deployed_devices.fleet.certificate import CertificateList
from twilio.rest.preview.deployed_devices.fleet.deployment import DeploymentList
from twilio.rest.preview.deployed_devices.fleet.device import DeviceList
from twilio.rest.preview.deployed_devices.fleet.key import KeyList
class FleetList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version):
"""
Initialize the FleetList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.deployed_devices.fleet.FleetList
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetList
"""
super(FleetList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Fleets'.format(**self._solution)
def create(self, friendly_name=values.unset):
"""
Create the FleetInstance
:param unicode friendly_name: A human readable description for this Fleet.
:returns: The created FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
data = values.of({'FriendlyName': friendly_name, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return FleetInstance(self._version, payload, )
def stream(self, limit=None, page_size=None):
"""
Streams FleetInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.deployed_devices.fleet.FleetInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists FleetInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.deployed_devices.fleet.FleetInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of FleetInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return FleetPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of FleetInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return FleetPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a FleetContext
:param sid: A string that uniquely identifies the Fleet.
:returns: twilio.rest.preview.deployed_devices.fleet.FleetContext
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetContext
"""
return FleetContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a FleetContext
:param sid: A string that uniquely identifies the Fleet.
:returns: twilio.rest.preview.deployed_devices.fleet.FleetContext
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetContext
"""
return FleetContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.DeployedDevices.FleetList>'
class FleetPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, response, solution):
"""
Initialize the FleetPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.deployed_devices.fleet.FleetPage
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetPage
"""
super(FleetPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FleetInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.deployed_devices.fleet.FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
return FleetInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.DeployedDevices.FleetPage>'
class FleetContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, sid):
"""
Initialize the FleetContext
:param Version version: Version that contains the resource
:param sid: A string that uniquely identifies the Fleet.
:returns: twilio.rest.preview.deployed_devices.fleet.FleetContext
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetContext
"""
super(FleetContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Fleets/{sid}'.format(**self._solution)
# Dependents
self._devices = None
self._deployments = None
self._certificates = None
self._keys = None
def fetch(self):
"""
Fetch the FleetInstance
:returns: The fetched FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return FleetInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the FleetInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def update(self, friendly_name=values.unset,
default_deployment_sid=values.unset):
"""
Update the FleetInstance
:param unicode friendly_name: A human readable description for this Fleet.
:param unicode default_deployment_sid: A default Deployment SID.
:returns: The updated FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
data = values.of({'FriendlyName': friendly_name, 'DefaultDeploymentSid': default_deployment_sid, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return FleetInstance(self._version, payload, sid=self._solution['sid'], )
@property
def devices(self):
"""
Access the devices
:returns: twilio.rest.preview.deployed_devices.fleet.device.DeviceList
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceList
"""
if self._devices is None:
self._devices = DeviceList(self._version, fleet_sid=self._solution['sid'], )
return self._devices
@property
def deployments(self):
"""
Access the deployments
:returns: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentList
:rtype: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentList
"""
if self._deployments is None:
self._deployments = DeploymentList(self._version, fleet_sid=self._solution['sid'], )
return self._deployments
@property
def certificates(self):
"""
Access the certificates
:returns: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateList
:rtype: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateList
"""
if self._certificates is None:
self._certificates = CertificateList(self._version, fleet_sid=self._solution['sid'], )
return self._certificates
@property
def keys(self):
"""
Access the keys
:returns: twilio.rest.preview.deployed_devices.fleet.key.KeyList
:rtype: twilio.rest.preview.deployed_devices.fleet.key.KeyList
"""
if self._keys is None:
self._keys = KeyList(self._version, fleet_sid=self._solution['sid'], )
return self._keys
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.DeployedDevices.FleetContext {}>'.format(context)
class FleetInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact help@twilio.com. """
def __init__(self, version, payload, sid=None):
"""
Initialize the FleetInstance
:returns: twilio.rest.preview.deployed_devices.fleet.FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
super(FleetInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'url': payload.get('url'),
'unique_name': payload.get('unique_name'),
'friendly_name': payload.get('friendly_name'),
'account_sid': payload.get('account_sid'),
'default_deployment_sid': payload.get('default_deployment_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FleetContext for this FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetContext
"""
if self._context is None:
self._context = FleetContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: A string that uniquely identifies this Fleet.
:rtype: unicode
"""
return self._properties['sid']
@property
def url(self):
"""
:returns: URL of this Fleet.
:rtype: unicode
"""
return self._properties['url']
@property
def unique_name(self):
"""
:returns: A unique, addressable name of this Fleet.
:rtype: unicode
"""
return self._properties['unique_name']
@property
def friendly_name(self):
"""
:returns: A human readable description for this Fleet.
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def account_sid(self):
"""
:returns: The unique SID that identifies this Account.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def default_deployment_sid(self):
"""
:returns: The unique SID that identifies this Fleet's default Deployment.
:rtype: unicode
"""
return self._properties['default_deployment_sid']
@property
def date_created(self):
"""
:returns: The date this Fleet was created.
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this Fleet was updated.
:rtype: datetime
"""
return self._properties['date_updated']
@property
def links(self):
"""
:returns: Nested resource URLs.
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch the FleetInstance
:returns: The fetched FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the FleetInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, friendly_name=values.unset,
default_deployment_sid=values.unset):
"""
Update the FleetInstance
:param unicode friendly_name: A human readable description for this Fleet.
:param unicode default_deployment_sid: A default Deployment SID.
:returns: The updated FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
default_deployment_sid=default_deployment_sid,
)
@property
def devices(self):
"""
Access the devices
:returns: twilio.rest.preview.deployed_devices.fleet.device.DeviceList
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceList
"""
return self._proxy.devices
@property
def deployments(self):
"""
Access the deployments
:returns: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentList
:rtype: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentList
"""
return self._proxy.deployments
@property
def certificates(self):
"""
Access the certificates
:returns: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateList
:rtype: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateList
"""
return self._proxy.certificates
@property
def keys(self):
"""
Access the keys
:returns: twilio.rest.preview.deployed_devices.fleet.key.KeyList
:rtype: twilio.rest.preview.deployed_devices.fleet.key.KeyList
"""
return self._proxy.keys
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.DeployedDevices.FleetInstance {}>'.format(context)
| mit | c42544cbfd7a9c064c0be7c385efff26 | 33.108159 | 107 | 0.628484 | 4.163771 | false | false | false | false |
twilio/twilio-python | twilio/rest/flex_api/v1/channel.py | 1 | 13623 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class ChannelList(ListResource):
def __init__(self, version):
"""
Initialize the ChannelList
:param Version version: Version that contains the resource
:returns: twilio.rest.flex_api.v1.channel.ChannelList
:rtype: twilio.rest.flex_api.v1.channel.ChannelList
"""
super(ChannelList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Channels'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams ChannelInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.flex_api.v1.channel.ChannelInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists ChannelInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.flex_api.v1.channel.ChannelInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of ChannelInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of ChannelInstance
:rtype: twilio.rest.flex_api.v1.channel.ChannelPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return ChannelPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of ChannelInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of ChannelInstance
:rtype: twilio.rest.flex_api.v1.channel.ChannelPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return ChannelPage(self._version, response, self._solution)
def create(self, flex_flow_sid, identity, chat_user_friendly_name,
chat_friendly_name, target=values.unset,
chat_unique_name=values.unset, pre_engagement_data=values.unset,
task_sid=values.unset, task_attributes=values.unset,
long_lived=values.unset):
"""
Create the ChannelInstance
:param unicode flex_flow_sid: The SID of the Flex Flow
:param unicode identity: The identity value that identifies the new resource's chat User
:param unicode chat_user_friendly_name: The chat participant's friendly name
:param unicode chat_friendly_name: The chat channel's friendly name
:param unicode target: The Target Contact Identity
:param unicode chat_unique_name: The chat channel's unique name
:param unicode pre_engagement_data: The pre-engagement data
:param unicode task_sid: The SID of the TaskRouter Task
:param unicode task_attributes: The Task attributes to be added for the TaskRouter Task
:param bool long_lived: Whether to create the channel as long-lived
:returns: The created ChannelInstance
:rtype: twilio.rest.flex_api.v1.channel.ChannelInstance
"""
data = values.of({
'FlexFlowSid': flex_flow_sid,
'Identity': identity,
'ChatUserFriendlyName': chat_user_friendly_name,
'ChatFriendlyName': chat_friendly_name,
'Target': target,
'ChatUniqueName': chat_unique_name,
'PreEngagementData': pre_engagement_data,
'TaskSid': task_sid,
'TaskAttributes': task_attributes,
'LongLived': long_lived,
})
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return ChannelInstance(self._version, payload, )
def get(self, sid):
"""
Constructs a ChannelContext
:param sid: The SID that identifies the Flex chat channel resource to fetch
:returns: twilio.rest.flex_api.v1.channel.ChannelContext
:rtype: twilio.rest.flex_api.v1.channel.ChannelContext
"""
return ChannelContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a ChannelContext
:param sid: The SID that identifies the Flex chat channel resource to fetch
:returns: twilio.rest.flex_api.v1.channel.ChannelContext
:rtype: twilio.rest.flex_api.v1.channel.ChannelContext
"""
return ChannelContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.FlexApi.V1.ChannelList>'
class ChannelPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the ChannelPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.flex_api.v1.channel.ChannelPage
:rtype: twilio.rest.flex_api.v1.channel.ChannelPage
"""
super(ChannelPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of ChannelInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.flex_api.v1.channel.ChannelInstance
:rtype: twilio.rest.flex_api.v1.channel.ChannelInstance
"""
return ChannelInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.FlexApi.V1.ChannelPage>'
class ChannelContext(InstanceContext):
def __init__(self, version, sid):
"""
Initialize the ChannelContext
:param Version version: Version that contains the resource
:param sid: The SID that identifies the Flex chat channel resource to fetch
:returns: twilio.rest.flex_api.v1.channel.ChannelContext
:rtype: twilio.rest.flex_api.v1.channel.ChannelContext
"""
super(ChannelContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Channels/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch the ChannelInstance
:returns: The fetched ChannelInstance
:rtype: twilio.rest.flex_api.v1.channel.ChannelInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return ChannelInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the ChannelInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.FlexApi.V1.ChannelContext {}>'.format(context)
class ChannelInstance(InstanceResource):
def __init__(self, version, payload, sid=None):
"""
Initialize the ChannelInstance
:returns: twilio.rest.flex_api.v1.channel.ChannelInstance
:rtype: twilio.rest.flex_api.v1.channel.ChannelInstance
"""
super(ChannelInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'flex_flow_sid': payload.get('flex_flow_sid'),
'sid': payload.get('sid'),
'user_sid': payload.get('user_sid'),
'task_sid': payload.get('task_sid'),
'url': payload.get('url'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: ChannelContext for this ChannelInstance
:rtype: twilio.rest.flex_api.v1.channel.ChannelContext
"""
if self._context is None:
self._context = ChannelContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource and owns this Workflow
:rtype: unicode
"""
return self._properties['account_sid']
@property
def flex_flow_sid(self):
"""
:returns: The SID of the Flex Flow
:rtype: unicode
"""
return self._properties['flex_flow_sid']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def user_sid(self):
"""
:returns: The SID of the chat user
:rtype: unicode
"""
return self._properties['user_sid']
@property
def task_sid(self):
"""
:returns: The SID of the TaskRouter Task
:rtype: unicode
"""
return self._properties['task_sid']
@property
def url(self):
"""
:returns: The absolute URL of the Flex chat channel resource
:rtype: unicode
"""
return self._properties['url']
@property
def date_created(self):
"""
:returns: The ISO 8601 date and time in GMT when the Flex chat channel was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The ISO 8601 date and time in GMT when the Flex chat channel was last updated
:rtype: datetime
"""
return self._properties['date_updated']
def fetch(self):
"""
Fetch the ChannelInstance
:returns: The fetched ChannelInstance
:rtype: twilio.rest.flex_api.v1.channel.ChannelInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the ChannelInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.FlexApi.V1.ChannelInstance {}>'.format(context)
| mit | 9faa1b42021c94c74e890b2efe5c2945 | 33.142857 | 97 | 0.611466 | 4.312441 | false | false | false | false |
twilio/twilio-python | tests/integration/api/v2010/account/queue/test_member.py | 1 | 8487 | # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class MemberTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queues("QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Queues/QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Members/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"queue_sid": "QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_enqueued": "Tue, 07 Aug 2012 22:57:41 +0000",
"position": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues/QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"wait_time": 143
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queues("QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_fetch_front_response(self):
self.holodeck.mock(Response(
200,
'''
{
"queue_sid": "QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_enqueued": "Tue, 07 Aug 2012 22:57:41 +0000",
"position": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues/QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"wait_time": 143
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queues("QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").fetch()
self.assertIsNotNone(actual)
def test_update_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queues("QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(url="https://example.com")
values = {'Url': "https://example.com", }
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Queues/QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Members/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json',
data=values,
))
def test_update_response(self):
self.holodeck.mock(Response(
200,
'''
{
"queue_sid": "QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_enqueued": "Thu, 06 Dec 2018 18:42:47 +0000",
"position": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues/QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"wait_time": 143
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queues("QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(url="https://example.com")
self.assertIsNotNone(actual)
def test_dequeue_front_response(self):
self.holodeck.mock(Response(
200,
'''
{
"queue_sid": "QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_enqueued": "Tue, 07 Aug 2012 22:57:41 +0000",
"position": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues/QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"wait_time": 143
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queues("QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members("CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").update(url="https://example.com")
self.assertIsNotNone(actual)
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queues("QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members.list()
self.holodeck.assert_has_request(Request(
'get',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Queues/QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Members.json',
))
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues/QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members.json?PageSize=50&Page=0",
"next_page_uri": null,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"queue_members": [
{
"queue_sid": "QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"call_sid": "CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_enqueued": "Mon, 17 Dec 2018 18:36:39 +0000",
"position": 1,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues/QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.json",
"wait_time": 124
}
],
"start": 0,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues/QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members.json?PageSize=50&Page=0"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queues("QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members.list()
self.assertIsNotNone(actual)
def test_read_empty_response(self):
self.holodeck.mock(Response(
200,
'''
{
"end": 0,
"first_page_uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues/QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members.json?Page=0&PageSize=50",
"next_page_uri": null,
"page": 0,
"page_size": 50,
"previous_page_uri": null,
"queue_members": [],
"start": 0,
"uri": "/2010-04-01/Accounts/ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Queues/QUaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Members.json"
}
'''
))
actual = self.client.api.v2010.accounts("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.queues("QUXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.members.list()
self.assertIsNotNone(actual)
| mit | 160aedd6cce588a4db5683bb6d090bef | 41.863636 | 182 | 0.567927 | 4.983558 | false | true | false | false |
acq4/acq4 | acq4/analysis/modules/MapCombiner/MapCombiner.py | 3 | 13050 | # -*- coding: utf-8 -*-
from __future__ import print_function
from six.moves import range
"""
For combining photostimulation maps across cells and displaying against 3D atlas.
"""
from acq4.util import Qt
from acq4.analysis.AnalysisModule import AnalysisModule
import os
from collections import OrderedDict
#import DatabaseGui
from acq4.util.ColorMapper import ColorMapper
import pyqtgraph as pg
import pyqtgraph.parametertree as ptree
import pyqtgraph.opengl as gl
import numpy as np
#import acq4.analysis.modules.Photostim.Scan as Scan
#from acq4.analysis.modules.Photostim.Map import Map
#import acq4.analysis.tools.poissonScore as poissonScore
#import flowchart.EventDetection as FCEventDetection
import acq4.analysis.atlas.CochlearNucleus as CN
from acq4.util.DatabaseGui.DatabaseQueryWidget import DatabaseQueryWidget
class MapCombiner(AnalysisModule):
def __init__(self, host):
AnalysisModule.__init__(self, host)
self.ctrlLayout = pg.LayoutWidget()
self.reloadBtn = Qt.QPushButton('Reload Data')
self.ctrlLayout.addWidget(self.reloadBtn)
self.ctrl = ptree.ParameterTree(showHeader=False)
self.ctrlLayout.addWidget(self.ctrl, row='next', col=0)
self.filterBtn = Qt.QPushButton('Filter')
self.ctrlLayout.addWidget(self.filterBtn, row='next', col=0)
self.cellList = Qt.QListWidget()
self.cellList.setSelectionMode(self.cellList.ExtendedSelection)
self.filterText = Qt.QTextEdit("selected = data")
self.ctrlLayout.addWidget(self.filterText, row='next', col=0)
self.ctrlLayout.addWidget(self.cellList, row='next', col=0)
## 3D atlas
self.atlas = CN.CNAtlasDisplayWidget()
self.stimPoints = gl.GLScatterPlotItem()
self.atlas.addItem(self.stimPoints)
self.cellPoints = gl.GLScatterPlotItem()
self.atlas.addItem(self.cellPoints)
modPath = os.path.abspath(os.path.dirname(__file__))
self.colorMapper = ColorMapper(filePath=os.path.join(modPath, "colorMaps"))
self._elements_ = OrderedDict([
#('Database Query', {'type':'ctrl', 'object': DatabaseQueryWidget(self.dataManager()), 'size':(300,200), 'pos': 'left'}),
('Options', {'type': 'ctrl', 'object': self.ctrlLayout, 'size': (300, 500), 'pos': 'left'}),
('Atlas', {'type': 'ctrl', 'object': self.atlas, 'size': (600,500), 'pos': 'right'}),
('Color Mapper', {'type': 'ctrl', 'object': self.colorMapper, 'size': (600,200), 'pos': ('bottom', 'Atlas')}),
])
host.resize(1100, 800)
self.initializeElements()
params = [
dict(name='Transform', type='group', children=[
dict(name='Mirror RL', type='bool', value=True),
dict(name='Cell-centered', type='bool', value=False),
]),
dict(name='Display', type='group', children=[
dict(name='Cells', type='bool', value=True),
dict(name='Color by type', type='bool', value=True),
dict(name='Stimulus Sites', type='bool', value=True),
dict(name='Atlas', type='bool', value=False),
dict(name='Grid', type='bool', value=True),
]),
FilterList(name='Filter'),
]
self.params = ptree.Parameter.create(name='options', type='group', children=params)
self.ctrl.setParameters(self.params, showTop=False)
#self.params.sigTreeStateChanged.connect(self.invalidate)
#dbq = self.getElement('Database Query', create=True)
#dbq.sigChanged.connect(self.dbDataChanged)
#db = dbq.currentDatabase()
db = self.dataManager().currentDatabase()
self.tableName = 'map_site_view'
if not db.hasTable(self.tableName):
print("Creating DB views.")
db.createView(self.tableName, ['map_sites', 'photostim_maps', 'dirtable_cell', 'cochlearnucleus_protocol', 'cochlearnucleus_cell'])
## view creation SQL:
## select * from map_sites
## inner join photostim_maps on "photostim_maps"."rowid"="map_sites"."map"
## inner join dirtable_cell on "dirtable_cell"."rowid"="photostim_maps"."cell"
## inner join cochlearnucleus_protocol on cochlearnucleus_protocol.protocoldir=map_sites.firstsite
## inner join cochlearnucleus_cell on cochlearnucleus_cell.celldir=dirtable_cell.rowid;
self.reloadData()
self.reloadBtn.clicked.connect(self.reloadData)
self.filterBtn.clicked.connect(self.refilter)
self.cellList.itemSelectionChanged.connect(self.selectCells)
self.colorMapper.sigChanged.connect(self.recolor)
self.params.param('Display').sigTreeStateChanged.connect(self.updateDisplay)
self.params.param('Transform').sigTreeStateChanged.connect(self.transform)
self.transform()
self.refilter()
def reloadData(self):
db = self.dataManager().currentDatabase()
self.data = db.select(self.tableName, toArray=True)
mapper = self.getElement('Color Mapper')
mapper.setArgList(self.data.dtype.names)
self.params.param('Filter').setData(self.data)
def updateDisplay(self):
if not self.params['Display', 'Cells']:
self.cellPoints.hide()
else:
self.cellPoints.show()
if not self.params['Display', 'Stimulus Sites']:
self.stimPoints.hide()
else:
self.stimPoints.show()
if self.params['Display', 'Atlas']:
self.atlas.showLabel('DCN')
self.atlas.showLabel('AVCN')
self.atlas.showLabel('PVCN')
else:
self.atlas.showLabel('DCN', False)
self.atlas.showLabel('AVCN', False)
self.atlas.showLabel('PVCN', False)
if self.params['Display', 'Grid']:
self.atlas.grid.show()
else:
self.atlas.grid.hide()
self.recolor()
def elementChanged(self, element, old, new):
name = element.name()
#def dbDataChanged(self):
#data = self.getElement('Database Query').table()
#mapper = self.getElement('Color Mapper')
#mapper.setArgList(data.dtype.names)
def transform(self):
data = self.data.copy()
if self.params['Transform', 'Mirror RL']:
data['right'] = np.abs(data['right'])
data['right:1'] = np.abs(data['right:1'])
if self.params['Transform', 'Cell-centered']:
r = data['right:1'].mean()
a = data['anterior:1'].mean()
d = data['dorsal:1'].mean()
data['right'] += r - data['right:1']
data['anterior'] += a - data['anterior:1']
data['dorsal'] += d - data['dorsal:1']
data['right:1'] = r
data['anterior:1'] = a
data['dorsal:1'] = d
self.transformed = data
self.refilter()
def refilter(self):
data = self.transformed
data = self.params.param('Filter').process(data)
exec(self.filterText.toPlainText())
self.filtered = selected
cells = set(self.filtered['cell'])
self.cellList.clear()
for c in cells:
item = Qt.QListWidgetItem(c.name())
item.dh = c
self.cellList.addItem(item)
self.cellList.selectAll()
self.selectCells()
def selectCells(self):
if len(self.cellList.selectedItems()) == self.cellList.count():
self.selected = self.filtered
else:
mask = np.zeros(len(self.filtered), dtype=bool)
for c in self.cellList.selectedItems():
mask |= (self.filtered['cell'] == c.dh)
self.selected = self.filtered[mask]
self.recolor()
def recolor(self):
#data = self.getElement('Database Query').table()
if self.selected is None:
return
data = self.selected
mapper = self.getElement('Color Mapper')
colors = mapper.getColorArray(data, opengl=True)
pos = np.empty((len(data), 3))
pos[:,0] = data['right']
pos[:,1] = data['anterior']
pos[:,2] = data['dorsal']
self.stimPoints.setData(pos=pos, color=colors, pxMode=False, size=100e-6)
cells = set(data['cell'])
inds = np.array([np.argwhere(data['cell']==c).flatten()[0] for c in cells], dtype=int)
data = data[inds]
pos = np.empty((len(data), 3))
pos[:,0] = data['right:1']
pos[:,1] = data['anterior:1']
pos[:,2] = data['dorsal:1']
if self.params['Display', 'Color by type']:
typeColors = {
'B': (0, 0, 1, 1),
'B?': (0.2, 0.2, 0.7, 1),
'S': (1, 1, 0, 1),
'S?': (0.7, 0.7, 0.3, 1),
'DS': (1, 0, 0, 1),
'DS?': (1, 0.5, 0, 1),
'TS': (0, 1, 0, 1),
'TS?': (0.5, 1, 0, 1),
'?': (0.5, 0.5, 0.5, 1),
}
color = np.empty((len(data),4))
for i in range(len(data)):
color[i] = typeColors.get(data[i]['CellType:1'], typeColors['?'])
else:
color = (1,1,1,1)
self.cellPoints.setData(pos=pos, color=color, size=20, pxMode=True)
class FilterList(ptree.types.GroupParameter):
def __init__(self, **kwds):
ptree.types.GroupParameter.__init__(self, addText='Add filter..', **kwds)
#self.params.addNew = self.addNew
#self.params.treeStateChanged.connect(self.stateChanged)
def addNew(self):
ch = FilterItem()
self.addChild(ch)
ch.setKeys(self.keyList())
def setData(self, data):
self.data = data
keys = self.keyList()
for ch in self:
ch.setKeys(keys)
def keyList(self):
return sorted(list(self.data.dtype.names))
def dataType(self, key):
kind = self.data.dtype.fields[key][0].kind
if kind in ('i', 'f'):
return kind, (self.data[key].min(), self.data[key].max())
else:
return kind, sorted(list(set(self.data[key])))
def process(self, data):
if len(data) == 0:
return data
for ch in self:
data = ch.process(data)
return data
#self.updateKeys(events.dtype.names)
#for fp in self.params:
#if fp.value() is False:
#continue
#key, mn, mx = fp['Field'], fp['Min'], fp['Max']
#vals = events[key]
#mask = (vals >= mn) * (vals < mx) ## Use inclusive minimum and non-inclusive maximum. This makes it easier to create non-overlapping selections
#events = events[mask]
#return events
class FilterItem(ptree.types.SimpleParameter):
def __init__(self, **opts):
opts['name'] = 'Filter'
opts['type'] = 'bool'
opts['value'] = True
opts['removable'] = True
opts['renamable'] = True
opts['autoIncrementName'] = True
ptree.types.SimpleParameter.__init__(self, **opts)
self.addChild(ptree.Parameter.create(name='Field', type='list'))
self.param('Field').sigValueChanged.connect(self.updateChildren)
def setKeys(self, keys):
self.param('Field').setLimits(keys)
def updateChildren(self):
for ch in list(self.children()):
if ch is not self.param('Field'):
self.removeChild(ch)
typ, limits = self.parent().dataType(self['Field'])
self.filterType = typ
if typ in ('i', 'f'):
self.addChild(ptree.Parameter.create(name='Min', type='float', value=limits[0]))
self.addChild(ptree.Parameter.create(name='Max', type='float', value=limits[1]))
else:
for x in limits:
ch = self.addChild(ptree.Parameter.create(name=str(x), type='bool', value=True))
ch.selectValue = x
def process(self, data):
if self.value() is False:
return data
key = self['Field']
if self.filterType in ('i', 'f'):
mask = (data[key] > self['Min']) & (data[key] < self['Max'])
else:
mask = np.zeros(len(data), dtype=bool)
for ch in self:
if ch is self.param('Field'):
continue
if ch.value() is True:
mask |= (data[key] == ch.selectValue)
return data[mask]
| mit | fca6aa8bd1b637a3473ffcb80f3fc075 | 36.395415 | 157 | 0.552567 | 3.806884 | false | false | false | false |
acq4/acq4 | acq4/devices/DAQGeneric/DaqChannelGui.py | 3 | 11563 | # -*- coding: utf-8 -*-
from __future__ import print_function
import weakref
import numpy
from pyqtgraph import SpinBox, WidgetGroup, mkPen
from six.moves import range
from acq4.util import Qt
from acq4.util.SequenceRunner import runSequence
AOChannelTemplate = Qt.importTemplate('.AOChannelTemplate')
DOChannelTemplate = Qt.importTemplate('.DOChannelTemplate')
InputChannelTemplate = Qt.importTemplate('.InputChannelTemplate')
###### For task GUIs
class DaqChannelGui(Qt.QWidget):
def __init__(self, parent, name, config, plot, dev, taskRunner, daqName=None):
Qt.QWidget.__init__(self, parent)
## Name of this channel
self.name = name
## Parent taskGui object
self.taskGui = weakref.ref(parent)
## Configuration for this channel defined in the device configuration file
self.config = config
self.scale = 1.0
self.units = ''
## The device handle for this channel's DAQGeneric device
self.dev = dev
## The task GUI window which contains this object
self.taskRunner = weakref.ref(taskRunner)
## Make sure task interface includes our DAQ device
if daqName is None:
self.daqDev = self.dev.getDAQName(self.name)
else:
self.daqDev = daqName
self.daqUI = self.taskRunner().getDevice(self.daqDev)
## plot widget
self.plot = plot
self.plot.setDownsampling(ds=True, auto=True, mode='peak')
self.plot.setClipToView(True)
def postUiInit(self):
## Automatically locate all read/writable widgets and group them together for easy
## save/restore operations
self.stateGroup = WidgetGroup(self)
self.stateGroup.addWidget(self.plot, name='plot')
self.displayCheckChanged()
self.ui.displayCheck.stateChanged.connect(self.displayCheckChanged)
if 'units' in self.config:
self.setUnits(self.config['units'])
else:
self.setUnits('')
def updateTitle(self):
self.ui.groupBox.setTitle(self.name + " (%s)" % self.units)
def setUnits(self, units):
self.units = units
for s in self.getSpins():
if isinstance(s, SpinBox):
s.setOpts(suffix=units)
self.updateTitle()
def getSpins(self):
return []
def setChildrenVisible(self, obj, vis):
for c in obj.children():
if isinstance(c, Qt.QWidget):
c.setVisible(vis)
else:
self.setChildrenVisible(c, vis)
def saveState(self):
return self.stateGroup.state()
def restoreState(self, state):
self.stateGroup.setState(state)
if hasattr(self.ui, 'waveGeneratorWidget'):
self.ui.waveGeneratorWidget.update()
def clearPlots(self):
self.plot.clear()
self.currentPlot = None
def displayCheckChanged(self):
if self.stateGroup.state()['displayCheck']:
self.plot.show()
else:
self.plot.hide()
def taskStarted(self, params):
pass
def taskSequenceStarted(self):
pass
def quit(self):
# print "quit DAQGeneric channel", self.name
self.plot.close()
class OutputChannelGui(DaqChannelGui):
sigSequenceChanged = Qt.Signal(object)
sigDataChanged = Qt.Signal(object)
def __init__(self, *args):
self._block_update = False # blocks plotting during state changes
DaqChannelGui.__init__(self, *args)
self.units = ''
self.currentPlot = None
if self.config['type'] == 'ao':
self.ui = AOChannelTemplate()
elif self.config['type'] == 'do':
self.ui = DOChannelTemplate()
else:
raise Exception("Unrecognized channel type '%s'" % self.config['type'])
self.ui.setupUi(self)
self.postUiInit()
self.daqChanged(self.daqUI.currentState())
if self.config['type'] == 'ao':
for s in self.getSpins():
s.setOpts(dec=True, bounds=[None, None], step=1.0, minStep=1e-12, siPrefix=True)
self.daqUI.sigChanged.connect(self.daqChanged)
self.ui.waveGeneratorWidget.sigDataChanged.connect(self.updateWaves)
self.ui.waveGeneratorWidget.sigFunctionChanged.connect(self.waveFunctionChanged)
self.ui.waveGeneratorWidget.sigParametersChanged.connect(self.sequenceChanged)
self.ui.holdingCheck.stateChanged.connect(self.holdingCheckChanged)
self.ui.holdingSpin.valueChanged.connect(self.holdingSpinChanged)
self.ui.functionCheck.toggled.connect(self.functionCheckToggled)
self.dev.sigHoldingChanged.connect(self.updateHolding)
self.holdingCheckChanged()
self.ui.functionCheck.setChecked(True)
def getSpins(self):
return (self.ui.preSetSpin, self.ui.holdingSpin)
def setMeta(self, key, **kwargs):
## key is 'x' (time), 'y' (amp), or 'xy' (sum)
self.ui.waveGeneratorWidget.setMeta(key, **kwargs)
def setUnits(self, units, **kwargs):
DaqChannelGui.setUnits(self, units)
self.ui.waveGeneratorWidget.setMeta('y', units=units, siPrefix=True, **kwargs)
def quit(self):
DaqChannelGui.quit(self)
try:
self.daqUI.sigChanged.disconnect(self.daqChanged)
except TypeError:
pass
self.ui.waveGeneratorWidget.sigDataChanged.disconnect(self.updateWaves)
self.ui.waveGeneratorWidget.sigFunctionChanged.disconnect(self.waveFunctionChanged)
self.ui.waveGeneratorWidget.sigParametersChanged.disconnect(self.sequenceChanged)
self.ui.holdingCheck.stateChanged.disconnect(self.holdingCheckChanged)
self.ui.holdingSpin.valueChanged.disconnect(self.holdingSpinChanged)
self.dev.sigHoldingChanged.disconnect(self.updateHolding)
def functionCheckToggled(self, checked):
if checked:
self.ui.waveGeneratorWidget.setEnabled(True)
self.updateWaves()
else:
self.ui.waveGeneratorWidget.setEnabled(False)
self.updateWaves()
def daqChanged(self, state):
self.rate = state['rate']
self.numPts = state['numPts']
self.timeVals = numpy.linspace(0, float(self.numPts) / self.rate, self.numPts)
self.updateWaves()
def listSequence(self):
return self.ui.waveGeneratorWidget.listSequences()
def sequenceChanged(self):
self.sigSequenceChanged.emit(self.dev.name())
def generateTask(self, params=None):
if params is None:
params = {}
prot = {}
state = self.stateGroup.state()
if state['preSetCheck']:
prot['preset'] = state['preSetSpin']
if state['holdingCheck']:
prot['holding'] = state['holdingSpin']
if state['functionCheck']:
prot['command'] = self.getSingleWave(params)
return prot
def handleResult(self, result, params):
pass
def updateWaves(self):
if self._block_update:
return
if not self.ui.functionCheck.isChecked():
self.plot.clear()
return
self.clearPlots()
## display sequence waves
params = {}
ps = self.ui.waveGeneratorWidget.listSequences()
for k in ps:
params[k] = list(range(len(ps[k])))
waves = []
runSequence(lambda p: waves.append(self.getSingleWave(p)), params,
list(params.keys())) ## appends waveforms for the entire parameter space to waves
autoRange = self.plot.getViewBox().autoRangeEnabled()
self.plot.enableAutoRange(x=False, y=False)
try:
for w in waves:
if w is not None:
# self.ui.functionCheck.setChecked(True)
self.plotCurve(w, color=Qt.QColor(100, 100, 100))
## display single-mode wave in red
single = self.getSingleWave()
if single is not None:
# self.ui.functionCheck.setChecked(True)
self.plotCurve(single, color=Qt.QColor(200, 100, 100))
finally:
self.plot.enableAutoRange(x=autoRange[0], y=autoRange[1])
self.sigDataChanged.emit(self)
def taskStarted(self, params):
## Draw green trace for current command waveform
if not self.stateGroup.state()['displayCheck']:
return
if self.currentPlot is not None:
self.plot.removeItem(self.currentPlot)
cur = self.getSingleWave(params)
if cur is not None:
self.currentPlot = self.plotCurve(cur, color=Qt.QColor(100, 200, 100))
self.currentPlot.setZValue(100)
def plotCurve(self, data, color=Qt.QColor(100, 100, 100), replot=True):
plot = self.plot.plot(y=data, x=self.timeVals, pen=mkPen(color))
return plot
def getSingleWave(self, params=None):
state = self.stateGroup.state()
h = self.getHoldingValue()
if h is not None:
self.ui.waveGeneratorWidget.setOffset(h)
wave = self.ui.waveGeneratorWidget.getSingle(self.rate, self.numPts, params)
return wave
def holdingCheckChanged(self, *v):
self.ui.holdingSpin.setEnabled(self.ui.holdingCheck.isChecked())
self.updateHolding()
def holdingSpinChanged(self, *args):
hv = self.getHoldingValue()
if hv is not None:
self.ui.waveGeneratorWidget.setOffset(hv)
def updateHolding(self):
hv = self.getHoldingValue()
if hv is not None:
if not self.ui.holdingCheck.isChecked():
self.ui.holdingSpin.setValue(hv)
self.ui.waveGeneratorWidget.setOffset(hv)
def getHoldingValue(self):
"""Return the value for this channel that will be used when the task is run
(by default, this is just the current holding value)"""
if self.ui.holdingCheck.isChecked():
return self.ui.holdingSpin.value()
else:
return self.taskGui().getChanHolding(self.name)
def waveFunctionChanged(self):
if self.ui.waveGeneratorWidget.functionString() != "":
self.ui.functionCheck.setChecked(True)
else:
self.ui.functionCheck.setChecked(False)
def restoreState(self, state):
block = self._block_update
self._block_update = True
try:
DaqChannelGui.restoreState(self, state)
finally:
self._block_update = False
self.updateWaves()
class InputChannelGui(DaqChannelGui):
def __init__(self, *args):
DaqChannelGui.__init__(self, *args)
self.ui = InputChannelTemplate()
self.ui.setupUi(self)
self.postUiInit()
self.clearBeforeNextPlot = False
def taskSequenceStarted(self):
self.clearBeforeNextPlot = True
def listSequence(self):
return []
def generateTask(self, params=None):
state = self.stateGroup.state()
return {'record': state['recordCheck'], 'recordInit': state['recordInitCheck']}
def handleResult(self, result, params):
if self.stateGroup.state()['displayCheck']:
if self.clearBeforeNextPlot:
self.clearPlots()
self.clearBeforeNextPlot = False
plot = self.plot.plot(
y=result.view(numpy.ndarray),
x=result.xvals('Time'),
pen=mkPen(200, 200, 200),
params=params)
| mit | 0786c043721e7e7bf531668684e983a9 | 32.419075 | 102 | 0.623108 | 3.906419 | false | false | false | false |
acq4/acq4 | acq4/drivers/MultiClamp/MultiClampTelegraph.py | 2 | 14676 | # -*- coding: utf-8 -*-
from __future__ import print_function
import sys
from acq4.util.clibrary import winDefs, CParser, CLibrary
sys.path.append('C:\\cygwin\\home\\Experimenters\\luke\\acq4\\lib\\util')
import ctypes
import os, threading, time
DEBUG = False
if DEBUG:
print("MultiClampTelegraph Debug:", DEBUG)
__all__ = ['MultiClampTelegraph', 'wmlib']
## Load windows definitions
windowsDefs = winDefs() #verbose=True)
d = os.path.dirname(__file__)
# Load telegraph definitions
teleDefs = CParser(
#os.path.join(d, 'MultiClampBroadcastMsg.hpp'),
copyFrom=windowsDefs,
cache=os.path.join(d, 'MultiClampBroadcastMsg.hpp.cache'),
verbose=DEBUG
)
## Windows Messaging API
# provides RegisterWindowMessageA, PostMessageA, PeekMessageA, GetMessageA
# See: http://msdn.microsoft.com/en-us/library/dd458658(VS.85).aspx
wmlib = CLibrary(ctypes.windll.User32, teleDefs, prefix='MCTG_')
## Naturally we can't use the same set of definitions for the 700A and 700B.
ax700ADefs = CParser(
#os.path.join(d, 'MCTelegraphs.hpp'),
copyFrom=windowsDefs,
cache=os.path.join(d, 'MCTelegraphs.hpp.cache'),
verbose=DEBUG
)
class MultiClampTelegraph:
"""Class for receiving 'telegraph' packets from MultiClamp commander.
This class is automatically invoked by MultiClamp."""
def __init__(self, channels, callback, debug=DEBUG):
"""Create a telegraph thread that opens connections to the devices listed in
'channels' and reports changes to the devices through 'callback'.
"""
self.debug = debug
if self.debug:
print("Initializing MultiClampTelegraph")
## remember index of each device for communicating through callback
self.channels = channels
self.devIndex = dict([(self.mkDevId(channels[k]), k) for k in channels])
#print "DEV index:", self.devIndex
self.callback = callback
self.lock = threading.RLock()
self.thread = threading.Thread(name="MultiClampTelegraph", target=self.messageLoop)
self.thread.daemon = True
self.startMessageThread()
def mkDevId(self, desc):
"""Create a device ID used for communicating via telegraph"""
#print "mkDevId", desc
if self.debug:
print("MultiClampTelegraph.mkDevId called.")
if desc['model'] == 0:
return desc['com'] | (desc['dev'] << 8) | (desc['chan'] << 16)
elif desc['model'] == 1:
return (int(desc['sn']) & 0x0FFFFFFF) | (desc['chan'] << 28)
else:
raise Exception('Device type not supported:', desc)
def __del__(self):
self.quit()
def quit(self):
if self.debug:
print("MultiClampTelegraph.quit called.")
if self.thread.is_alive():
self.stopMessageThread()
self.thread.join(5.0)
if self.thread.is_alive():
print("WARNING: Failed to stop MultiClamp telegraph thread.")
def startMessageThread(self):
if self.debug:
print("MultiClampTelegraph.startMessageThread called.")
with self.lock:
self.stopThread = False
self.thread.start()
def stopMessageThread(self):
if self.debug:
print("MultiClampTelegraph.stopMessageThread called.")
with self.lock:
self.stopThread = True
def updateState(self, devID, state):
#with self.lock:
#self.devices[devID][1] = state
#print("update state:", devID, self.devIndex[devID])
if self.debug:
print("MultiClampTelegraph.updateState called.")
self.emit('update', self.devIndex[devID], state)
def emit(self, *args):
"""Send a message via the registered callback function"""
if self.debug:
print("MultiClampTelegraph.emit called.")
with self.lock:
self.callback(*args)
def messageLoop(self):
if self.debug:
print("MultiClampTelegraph.messageLoop called.")
# create hidden window for receiving messages (how silly is this?)
self.createWindow()
self.registerMessages()
#print "window handle:", self.hWnd
#print "messages:", self.msgIds
# request connection to MCC
for d in self.devIndex:
#print "Open:", d
self.post('OPEN', d)
# listen for changes / reconnect requests / stop requests
while True:
while True: ## pull all waiting messages
## wndProc will be called during PeekMessage if we have received any updates.
## reconnect messages are received directly by PeekMessage
ret = wmlib.PeekMessageA(None, self.hWnd, 0, 0, wmlib.PM_REMOVE)
if ret() == 0:
break
else:
msg = ret[0].message
if msg == self.msgIds['RECONNECT']:
devID = ret[0].lParam
if devID in self.devIndex:
self.emit('reconnect')
self.post('OPEN', devID) ## reopen connection to device
elif msg == self.msgIds['COMMAND']:
print("Peeked command.")
with self.lock:
if self.stopThread:
for d in self.devIndex:
self.post('CLOSE', d)
break
time.sleep(0.1)
def createWindow(self):
if self.debug:
print("MultiClampTelegraph.createWindow called.")
self.wndClass = wmlib.WNDCLASSA(0, wmlib.WNDPROC(self.wndProc), 0, 0, wmlib.HWND_MESSAGE, 0, 0, 0, b"", b"AxTelegraphWin")
ret = wmlib.RegisterClassA(self.wndClass)
#print "Register class:", ret()
if ret() == 0:
raise Exception("Error registering window class.")
cwret = wmlib.CreateWindowExA(
0, self.wndClass.lpszClassName, b"title",
wmlib.WS_OVERLAPPEDWINDOW,
wmlib.CW_USEDEFAULT,
wmlib.CW_USEDEFAULT,
wmlib.CW_USEDEFAULT,
wmlib.CW_USEDEFAULT,
0, 0, wmlib.HWND_MESSAGE, 0)
if cwret() == 0:
raise Exception("Error creating window.", self.getWindowsError())
self.hWnd = cwret.rval
#print "Create window:", self.hWnd
def wndProc(self, hWnd, msg, wParam, lParam):
"""Callback function executed by windows when a message has arrived."""
#print "Window event:", msg
if self.debug:
print("MultiClampTelegraph.wndProc called.")
if msg == wmlib.WM_COPYDATA:
data = ctypes.cast(lParam, ctypes.POINTER(wmlib.COPYDATASTRUCT)).contents
if data.dwData == self.msgIds['REQUEST']:
if self.debug:
print(" COPYDATASTRUCT.dwData (ULONG_PTR, a memory address):", data.dwData) ### ULONG_PTR should be a 64-bit number on 64-bit machines, and a 32-bit number on 32-bit machines
print(" COPYDATASTRUCT.cbData (DWORD, the size (in bytes) of data pointed to by lpData):", data.cbData)
print(" COPYDATASTRUCT.lpData (PVOID, a pointer to the data to be passed): ", data.lpData)
data = ctypes.cast(data.lpData, ctypes.POINTER(wmlib.MC_TELEGRAPH_DATA)).contents
#### Make sure packet is for the correct device!
devID = self.mkDevId({'com': data.uComPortID, 'dev': data.uAxoBusID, 'chan': data.uChannelID, 'model': data.uHardwareType, 'sn': data.szSerialNumber})
if not devID in self.devIndex:
return False
#for f in data._fields_:
#print " ", f[0], getattr(data, f[0])
#global d
#state = dict([(f[0], getattr(data, f[0])) for f in data._fields_])
## translate state into something prettier
#print "units:", data.uScaleFactorUnits, data.uRawScaleFactorUnits
mode = ['VC', 'IC', 'I=0'][data.uOperatingMode]
if data.uHardwareType == wmlib.MCTG_HW_TYPE_MC700A:
if self.debug:
print(" processing MC700A mode", mode)
if mode == 'VC':
priSignal = ax700ADefs.defs['values']['MCTG_OUT_MUX_VC_LONG_NAMES'][data.uScaledOutSignal]
secSignal = ax700ADefs.defs['values']['MCTG_OUT_MUX_VC_LONG_NAMES_RAW'][data.uRawOutSignal]
else:
priSignal = ax700ADefs.defs['values']['MCTG_OUT_MUX_IC_LONG_NAMES'][data.uScaledOutSignal]
secSignal = ax700ADefs.defs['values']['MCTG_OUT_MUX_IC_LONG_NAMES_RAW'][data.uRawOutSignal]
else:
try:
priSignal = wmlib.MCTG_OUT_GLDR_LONG_NAMES[data.uScaledOutSignal]
except IndexError:
# Some amps report signal 44 here when either auxiliary signal is selected.
# This prevents errors, but unfortunately means we can't tell the difference between
# aux1 and aux2.
priSignal = "Auxiliary 1"
try:
secSignal = wmlib.MCTG_OUT_GLDR_LONG_NAMES[data.uRawOutSignal]
except IndexError:
# Some amps report signal 44 here when either auxiliary signal is selected.
# This prevents errors, but unfortunately means we can't tell the difference between
# aux1 and aux2.
secSignal = "Auxiliary 1"
priUnits = UNIT_MAP[data.uScaleFactorUnits]
secUnits = UNIT_MAP[data.uRawScaleFactorUnits]
# Scale factors are 0 for aux signals.
sf = data.dScaleFactor if data.dScaleFactor != 0 else 1
rsf = data.dRawScaleFactor if data.dRawScaleFactor != 0 else 1
state = {
'mode': mode,
'primarySignal': priSignal,
'primaryGain': data.dAlpha,
'primaryUnits': priUnits[0],
'primaryScaleFactor': priUnits[1] / (sf * data.dAlpha),
'secondarySignal': secSignal,
'secondaryGain': 1.0,
'secondaryUnits': secUnits[0],
'secondaryScaleFactor': secUnits[1] / (rsf * 1.0),
'membraneCapacitance': data.dMembraneCap,
'LPFCutoff': data.dLPFCutoff,
'extCmdScale': data.dExtCmdSens,
}
#print "EXT:", data.dExtCmdSens
self.updateState(devID, state)
elif data.dwData == self.msgIds['COMMAND']:
print("Caught command!")
return False
#else:
##print " unknown message type", data.dwData
return True
def getWindowsError(self):
if self.debug:
print("MultiClampTelegraph.getWindowsError called.")
return ctypes.windll.kernel32.GetLastError()
def registerMessages(self):
if self.debug:
print("MultiClampTelegraph.registerMessages called.")
self.msgIds = {}
for m in ['OPEN', 'CLOSE', 'REQUEST', 'BROADCAST', 'RECONNECT', 'ID']:
self.msgIds[m] = wmlib.RegisterWindowMessageA(wmlib("values", "MCTG_" + m + "_MESSAGE_STR").encode("ascii"))()
self.msgIds["COMMAND"] = wmlib.RegisterWindowMessageA(wmlib("values", "MC_COMMAND_MESSAGE_STR").encode("ascii"))()
def post(self, msg, val):
if self.debug:
print("MultiClampTelegraph.post called.")
print(" msg:", msg, " val:", val)
ret = wmlib.PostMessageA(wmlib.HWND_BROADCAST, self.msgIds[msg], self.hWnd, val)
if ret() == 0:
raise Exception("Error during post.", self.getWindowsError())
UNIT_MAP = {
wmlib.UNITS_VOLTS_PER_VOLT: ('V', 1.0),
wmlib.UNITS_VOLTS_PER_MILLIVOLT: ('V', 1e-3),
wmlib.UNITS_VOLTS_PER_MICROVOLT: ('V', 1e-6),
wmlib.UNITS_VOLTS_PER_AMP: ('A', 1.0),
wmlib.UNITS_VOLTS_PER_MILLIAMP: ('A', 1e-3),
wmlib.UNITS_VOLTS_PER_MICROAMP: ('A', 1e-6),
wmlib.UNITS_VOLTS_PER_NANOAMP: ('A', 1e-9),
wmlib.UNITS_VOLTS_PER_PICOAMP: ('A', 1e-12)
}
#UNIT_MAP = {}
#for k in teleDefs.defs['values']:
#if k[:17] == 'MCTG_UNITS_VOLTS_':
#UNIT_MAP[teleDefs.defs['values'][k]] = k[11:].lower()
## poll for commander windows
#def peekMsg():
#ret = wmlib.PeekMessageA(None, hWnd, 0, 0, wmlib.PM_REMOVE)
#if ret() == 0:
#return None
#elif ret() == -1:
#raise Exception("Error during peek", self.getWindowsError())
#else:
#msg = ret[0]
#if msg.message in msgIds.values():
#print "Peeked Message:", msgIds.keys()[msgIds.values().index(msg.message)]
#else:
#print "Peeked Message:", msg.message
#return msg
#def getMsgs():
#msgs = []
#while True:
#msg = peekMsg()
#if msg is None:
#return msgs
#else:
#msgs.append(msg)
#post(msgIds['OPEN'], packSignalIDs(3, 0, 1))
#post(msgIds['BROADCAST'], 0)
#time.sleep(1)
#msgs = getMsgs()
#ids = [m.lParam for m in msgs if m.message==msgIds['ID']]
#print "Devices available:", map(unpackID, ids)
#for i in ids:
#post(msgIds['OPEN'], i)
#def msgLoop():
#while True:
#m = peekMsg()
##if m is not None:
##print "got message"
#time.sleep(0.1)
#msgLoop()
#app._exec()
## start thread for receiving messages
## for each message:
#if msg.cbData == wmlib.MC_TELEGRAPH_DATA.size() and msg.dwData == msgIds['MCTG_REQUEST_MESSAGE_STR']:
#data = wmlib.MC_TELEGRAPH_DATA(msg.lpData)
#if data.uComPortID == 3 and data.uAxoBusID == 0 and data.uChannelID == 1:
### message is the correct type, and for the correct channel
#pass
## watch for reconnect messages
## close connection
#wmlib.PostMessageA(wmlib.HWND_BROADCAST, msgIds['MCTG_CLOSE_MESSAGE_STR'], hWnd, packSignalIDs(3, 0, 1))
## request an update
## careful -- does this disable automatic notification of changes?
#wmlib.PostMessageA(wmlib.HWND_BROADCAST, msgIds['MCTG_REQUEST_MESSAGE_STR'], hWnd, packSignalIDs(3, 0, 1))
| mit | e6c5f9a6a9643233091d2b2573a7d9a1 | 37.722955 | 197 | 0.567457 | 3.782474 | false | false | false | false |
acq4/acq4 | acq4/util/Canvas/items/MarkersCanvasItem.py | 3 | 6603 | # -*- coding: utf-8 -*-
from __future__ import print_function
import weakref
from acq4.util import Qt
from .CanvasItem import CanvasItem
import pyqtgraph as pg
import pyqtgraph.graphicsItems.TargetItem
from .itemtypes import registerItemType
class MarkersCanvasItem(CanvasItem):
"""
Canvas item used for marking multiple locations in 3D.
"""
_typeName = "Markers"
def __init__(self, **kwds):
kwds.pop('viewRect', None)
item = pg.ItemGroup()
opts = {'scalable': False, 'rotatable': False, 'movable': False}
opts.update(kwds)
CanvasItem.__init__(self, item, **opts)
self.params = pg.parametertree.Parameter.create(name='Markers', type='group', addText='Add marker...')
self.params.addNew = self.addMarker
self.params.sigTreeStateChanged.connect(self._paramsChanged)
self._markerCtrl = MarkerItemCtrlWidget(self)
self.layout.addWidget(self._markerCtrl, self.layout.rowCount(), 0, 1, 2)
@classmethod
def checkFile(cls, fh):
return 0
def addMarker(self, name='marker', position=(0, 0, 0), params=None):
children = [
PointParameter(name='Position', value=position)
]
# allow adding extra parameters when adding new markers
# if params is not None:
# :MC: disabled because kwds does not exist
# children.extend(kwds['params'])
param = pg.parametertree.Parameter.create(name=name, autoIncrementName=True, type='group', renamable=True, removable=True, children=children)
self.params.addChild(param)
target = pg.graphicsItems.TargetItem.TargetItem()
target.setLabel(name, {"angle": 45})
target.setParentItem(self.graphicsItem())
target.setPos(position[0], position[1])
target.param = weakref.ref(param)
target.sigDragged.connect(self._targetMoved)
param.target = target
def removeMarker(self, name):
param = self.params.child(name)
self.params.removeChild(param)
param.target.scene().removeItem(target)
def setMarkerPosition(self):
self.btns['setCellPosition'].setText("Click on new cell position")
# Evaluate items under click, ignore anything that is transparent, and raise an exception if the top item is partially transparent.
# what if the top item has a composition mode that renders it invisible?
# Maybe we just need a global focus similar to the camera module?
# just show one line for the most recently-updated image depth?
# One line per image?
def _targetMoved(self, target):
pos = target.pos()
param = target.param()
param['Position', 'x'] = pos.x()
param['Position', 'y'] = pos.y()
def _paramsChanged(self, root, changes):
for param, change, args in changes:
if change == 'value' and isinstance(param, PointParameter):
target = param.parent().target
with pg.SignalBlock(target.sigDragged, self._targetMoved):
target.setPos(*param.value()[:2])
elif change == 'name' and param in self.params.children():
param.target.setLabel(param.name())
def saveState(self, **kwds):
state = CanvasItem.saveState(self, **kwds)
state['markers'] = [(p.name(), p['Position']) for p in self.params.children()]
return state
def restoreState(self, state):
markers = state.pop('markers')
CanvasItem.restoreState(self, state)
for marker in self.params.children():
self.removeMarker(marker.name())
for name, pos in markers:
self.addMarker(name, pos)
class PointParameterItem(pg.parametertree.ParameterItem):
def __init__(self, param, depth):
pg.parametertree.ParameterItem.__init__(self, param, depth)
self.valueChanged(self.param, self.param.value())
def valueChanged(self, param, val):
strs = tuple([pg.siFormat(x, suffix='m') for x in val])
self.setText(1, '[%s, %s, %s]' % strs)
class PointParameter(pg.parametertree.Parameter):
itemClass = PointParameterItem
def __init__(self, **kwds):
pos = kwds.get('value', (0, 0, 0))
pg.parametertree.Parameter.__init__(self, expanded=False, children=[
{'name': 'x', 'type': 'float', 'value': pos[0], 'suffix': 'm', 'siPrefix': True, 'step': 10e-6},
{'name': 'y', 'type': 'float', 'value': pos[1], 'suffix': 'm', 'siPrefix': True, 'step': 10e-6},
{'name': 'z', 'type': 'float', 'value': pos[2], 'suffix': 'm', 'siPrefix': True, 'step': 10e-6},
], **kwds)
self._updateChildren()
self.sigTreeStateChanged.connect(self._treeStateChanged)
def _updateChildren(self):
with pg.SignalBlock(self.sigTreeStateChanged, self._treeStateChanged):
self['x'], self['y'], self['z'] = self.value()
def _treeStateChanged(self, root, changes):
# child parameter value changed; update this value to match
for param, change, args in changes:
if change != 'value':
continue
if param is self:
self._updateChildren()
else:
with pg.SignalBlock(self.sigTreeStateChanged, self._treeStateChanged):
self.setValue((self['x'], self['y'], self['z']))
registerItemType(MarkersCanvasItem)
class MarkerItemCtrlWidget(Qt.QWidget):
def __init__(self, canvasitem):
Qt.QWidget.__init__(self)
self.canvasitem = weakref.ref(canvasitem)
self.layout = Qt.QGridLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self.layout)
self.ptree = pg.parametertree.ParameterTree(showHeader=False)
self.ptree.setParameters(canvasitem.params)
self.layout.addWidget(self.ptree, 0, 0, 1, 2)
self.saveJsonBtn = Qt.QPushButton('Save Json')
self.layout.addWidget(self.saveJsonBtn, 1, 0)
self.saveJsonBtn.clicked.connect(self.saveJson)
self.copyJsonBtn = Qt.QPushButton('Copy Json')
self.layout.addWidget(self.copyJsonBtn, 1, 0)
self.copyJsonBtn.clicked.connect(self.copyJson)
def saveJson(self):
filename = Qt.QFileDialog.getSaveFileName(None, "Save markers", path, "JSON files (*.json)")
if filename == '':
return
if not filename.endswith('.json'):
filename += '.json'
def copyJson(self):
pass
| mit | 7475dc24776c6a003e035eda715f758d | 37.395349 | 149 | 0.615023 | 3.918694 | false | false | false | false |
acq4/acq4 | acq4/devices/Camera/taskGUI.py | 2 | 5099 | # -*- coding: utf-8 -*-
import pyqtgraph as pg
from acq4.devices.DAQGeneric.taskGUI import DAQGenericTaskGui
from acq4.util import Qt
Ui_Form = Qt.importTemplate('.TaskTemplate')
class CameraTaskGui(DAQGenericTaskGui):
def __init__(self, dev, taskRunner):
DAQGenericTaskGui.__init__(self, dev, taskRunner, ownUi=False) ## When initializing superclass, make sure it knows this class is creating the ui.
self.ui = Ui_Form()
self.ui.setupUi(self)
self.stateGroup = pg.WidgetGroup(self) ## create state group before DAQ creates its own interface
self.ui.horizSplitter.setStretchFactor(0, 0)
self.ui.horizSplitter.setStretchFactor(1, 1)
DAQGenericTaskGui.createChannelWidgets(self, self.ui.ctrlSplitter, self.ui.plotSplitter)
self.ui.plotSplitter.setStretchFactor(0, 10)
self.ui.plotSplitter.setStretchFactor(1, 1)
self.ui.plotSplitter.setStretchFactor(2, 1)
self.ui.fixedFrameEnabled.toggled.connect(self._setFixedFrameEnable)
self.ui.minFrames.setOpts(int=True, dec=True, step=0.1, minStep=1, compactHeight=False)
## plots should not be storing more than one trace at a time.
for p in self.plots.values():
p.plotItem.ctrl.maxTracesCheck.setChecked(True)
p.plotItem.ctrl.maxTracesSpin.setValue(1)
p.plotItem.ctrl.forgetTracesCheck.setChecked(True)
tModes = self.dev.listParams('triggerMode')[0]
for m in tModes:
self.ui.triggerModeCombo.addItem(m)
self.vLines = []
if 'trigger' in self.plots:
l = pg.InfiniteLine()
self.vLines.append(l)
self.plots['trigger'].addItem(l)
if 'exposure' in self.plots:
l = pg.InfiniteLine()
self.vLines.append(l)
self.plots['exposure'].addItem(l)
self.frameTicks = pg.VTickGroup()
self.frameTicks.setYRange([0.8, 1.0])
self.ui.imageView.sigTimeChanged.connect(self.timeChanged)
self.taskRunner.sigTaskPaused.connect(self.taskPaused)
def _setFixedFrameEnable(self, enable):
self.ui.minFrames.setEnabled(enable)
def timeChanged(self, i, t):
for l in self.vLines:
l.setValue(t)
def saveState(self):
s = self.currentState()
s['daqState'] = DAQGenericTaskGui.saveState(self)
return s
def restoreState(self, state):
self.stateGroup.setState(state)
if 'daqState' in state:
DAQGenericTaskGui.restoreState(self, state['daqState'])
def generateTask(self, params=None):
daqProt = DAQGenericTaskGui.generateTask(self, params)
if params is None:
params = {}
state = self.currentState()
task = {
'record': state['recordCheck'],
'triggerProtocol': state['triggerCheck'],
'params': {
'triggerMode': state['triggerModeCombo']
}
}
task['channels'] = daqProt
if state['releaseBetweenRadio']:
task['pushState'] = None
task['popState'] = None
if state['fixedFrameEnabled']:
task['minFrames'] = state['minFrames']
return task
def taskSequenceStarted(self):
DAQGenericTaskGui.taskSequenceStarted(self)
if self.ui.releaseAfterRadio.isChecked():
# For now, the task gui only changes triggerMode. If we allow
# other parameters to be changed from here, then they will have to be added
# to the list of parameters to push/pop
self.dev.pushState('cam_proto_state', params=['triggerMode'])
def taskFinished(self):
DAQGenericTaskGui.taskFinished(self)
if self.ui.releaseAfterRadio.isChecked():
self.dev.popState('cam_proto_state')
def taskPaused(self): ## If the task is paused, return the camera to its previous state until we start again
if self.ui.releaseAfterRadio.isChecked():
self.dev.popState('cam_proto_state')
self.dev.pushState('cam_proto_state')
def currentState(self):
return self.stateGroup.state()
def handleResult(self, result, params):
state = self.stateGroup.state()
if state['displayCheck']:
if result is None or len(result.frames()) == 0:
print("No images returned from camera task.")
self.ui.imageView.clear()
else:
frameTimes, precise = result.frameTimes()
if precise:
self.ui.imageView.setImage(result.asMetaArray(), xvals=frameTimes)
self.frameTicks.setXVals(frameTimes)
else:
self.ui.imageView.setImage(result.asMetaArray())
DAQGenericTaskGui.handleResult(self, result.daqResult(), params)
def quit(self):
self.ui.imageView.close()
DAQGenericTaskGui.quit(self)
| mit | fecb54bc5c10fa0633f503d25e9d56e1 | 38.223077 | 154 | 0.608158 | 3.949651 | false | false | false | false |
acq4/acq4 | acq4/analysis/AnalysisHost.py | 3 | 2097 | # -*- coding: utf-8 -*-
from __future__ import print_function
import six
from acq4.util import Qt
from . import modules
import pyqtgraph.dockarea as dockarea
import acq4.Manager
#from acq4.LogWindow import LogButton
from acq4.util.StatusBar import StatusBar
class AnalysisHost(Qt.QMainWindow):
"""Window for hosting analysis widgets.
Provides:
- File / DB access for module
-
"""
def __init__(self, dataManager=None, dataModel=None, module=None):
Qt.QMainWindow.__init__(self)
self.dm = dataManager
self.dataModel = dataModel
self.mod = None
self.dockArea = dockarea.DockArea()
self.setCentralWidget(self.dockArea)
#self.logBtn = LogButton('Log')
#self.statusBar().addPermanentWidget(self.logBtn)
self.setStatusBar(StatusBar())
if module is not None:
self.loadModule(module)
self.show()
def dataManager(self):
return self.dm
def loadModule(self, modName):
if self.mod is not None:
raise Exception("No fair loading extra modules in one host.")
self.mod = modules.load(modName, self)
elems = self.mod.listElements()
for name, el in elems.items():
w = self.mod.getElement(name, create=True)
d = dockarea.Dock(name=name, size=el.size())
if w is not None:
d.addWidget(w)
pos = el.pos()
if pos is None:
pos = ()
#print d, pos
if isinstance(pos, six.string_types):
pos = (pos,)
self.dockArea.addDock(d, *pos)
self.elements = elems
self.setWindowTitle(modName)
acq4.Manager.getManager().declareInterface(modName, 'analysisMod', self.mod)
# ask module for prefered size
self.resize(*self.mod.sizeHint())
def closeEvent(self, ev):
if self.quit():
ev.accept()
def quit(self):
return self.mod.quit()
| mit | 10a8da06f7d9f7726ac8343a00e34643 | 26.96 | 84 | 0.568908 | 3.994286 | false | false | false | false |
acq4/acq4 | acq4/util/imaging/frame_display.py | 3 | 5194 | from __future__ import print_function
import pyqtgraph as pg
from acq4.util import Qt
from acq4.util.cuda import shouldUseCuda, cupy
from acq4.util.debug import printExc
from .bg_subtract_ctrl import BgSubtractCtrl
from .contrast_ctrl import ContrastCtrl
class FrameDisplay(Qt.QObject):
"""Used with live imaging to hold the most recently acquired frame and allow
user control of contrast, gain, and background subtraction.
Provides:
* frame rate limiting
* contrast control widget
* background subtraction control widget
"""
# Allow subclasses to override these:
contrastClass = ContrastCtrl
bgSubtractClass = BgSubtractCtrl
imageUpdated = Qt.Signal(object) # emits frame when the image is redrawn
def __init__(self, maxFPS=30):
Qt.QObject.__init__(self)
self._maxFPS = maxFPS
self._sPerFrame = 1.0 / maxFPS
self._msPerFrame = int(self._sPerFrame * 1000)
self._imageItem = pg.ImageItem() # Implicitly depends on global setConfigOption state
self._imageItem.setAutoDownsample(True)
self.contrastCtrl = self.contrastClass()
self.contrastCtrl.setImageItem(self._imageItem)
self.bgCtrl = self.bgSubtractClass()
self.bgCtrl.needFrameUpdate.connect(self.updateFrame)
self.nextFrame = None
self._updateFrame = False
self.currentFrame = None
self.lastDrawTime = None
self.displayFps = None
self.hasQuit = False
# Check for new frame updates every 16ms
# Some checks may be skipped even if there is a new frame waiting to avoid drawing more than
# 60fps.
self.frameTimer = Qt.QTimer()
self.frameTimer.timeout.connect(self.drawFrame)
self.frameTimer.start(self._msPerFrame) # draw frames no faster than 60Hz
# Qt.QTimer.singleShot(1, self.drawFrame)
# avoiding possible singleShot-induced crashes
def updateFrame(self):
"""Redisplay the current frame.
"""
self._updateFrame = True
self.contrastCtrl.resetAutoGain()
def imageItem(self):
return self._imageItem
def contrastWidget(self):
return self.contrastCtrl
def backgroundWidget(self):
return self.bgCtrl
def backgroundFrame(self):
"""Return the currently active background image or None if background
subtraction is disabled.
"""
return self.bgCtrl.backgroundFrame()
def visibleImage(self):
"""Return a copy of the image as it is currently visible in the scene.
"""
if self.currentFrame is None:
return
return self.currentFrame.getImage()
def newFrame(self, frame):
# lf = None
# if self.nextFrame is not None:
# lf = self.nextFrame
# elif self.currentFrame is not None:
# lf = self.currentFrame
# self.nextFrame gets picked up by drawFrame() at some point
self.nextFrame = frame
self.bgCtrl.newFrame(frame)
def drawFrame(self):
if self.hasQuit:
return
try:
# If we last drew a frame < 1/30s ago, return.
t = pg.ptime.time()
if (self.lastDrawTime is not None) and (t - self.lastDrawTime < self._sPerFrame):
return
# if there is no new frame and no controls have changed, just exit
if not self._updateFrame and self.nextFrame is None:
return
self._updateFrame = False
# If there are no new frames and no previous frames, then there is nothing to draw.
if self.currentFrame is None and self.nextFrame is None:
return
prof = pg.debug.Profiler()
# We will now draw a new frame (even if the frame is unchanged)
if self.lastDrawTime is not None:
fps = 1.0 / (t - self.lastDrawTime)
self.displayFps = fps
self.lastDrawTime = t
prof()
# Handle the next available frame, if there is one.
if self.nextFrame is not None:
self.currentFrame = self.nextFrame
self.nextFrame = None
data = self.currentFrame.getImage()
# if we got a stack of frames, just display the first one. (not sure what else we could do here..)
if data.ndim == 3:
data = data[0]
prof()
# divide the background out of the current frame if needed
data = self.bgCtrl.processImage(data)
prof()
# Set new levels if auto gain is enabled
self.contrastCtrl.processImage(data)
prof()
if shouldUseCuda():
self._imageItem.updateImage(cupy.asarray(data))
else:
self._imageItem.updateImage(data.copy())
prof()
self.imageUpdated.emit(self.currentFrame)
prof()
prof.finish()
except Exception:
printExc("Error while drawing new frames:")
def quit(self):
self._imageItem = None
self.hasQuit = True
| mit | 754190c27f8d007f2964c87c38de8731 | 31.873418 | 110 | 0.608972 | 4.21249 | false | false | false | false |
richrd/suplemon | suplemon/modules/system_clipboard.py | 1 | 2970 | # -*- encoding: utf-8
import subprocess
from suplemon.suplemon_module import Module
class SystemClipboard(Module):
"""Integrates the system clipboard with suplemon."""
def init(self):
self.init_logging(__name__)
if self.has_xsel_support():
self.clipboard_type = "xsel"
elif self.has_pb_support():
self.clipboard_type = "pb"
elif self.has_xclip_support():
self.clipboard_type = "xclip"
else:
self.logger.warning(
"Can't use system clipboard. Install 'xsel' or 'pbcopy' or 'xclip' for system clipboard support.")
return False
self.bind_event_before("insert", self.insert)
self.bind_event_after("copy", self.copy)
self.bind_event_after("cut", self.copy)
def copy(self, event):
lines = self.app.get_editor().get_buffer()
data = "\n".join([str(line) for line in lines])
self.set_clipboard(data)
def insert(self, event):
data = self.get_clipboard()
lines = data.split("\n")
self.app.get_editor().set_buffer(lines)
def get_clipboard(self):
try:
if self.clipboard_type == "xsel":
command = ["xsel", "-b"]
elif self.clipboard_type == "pb":
command = ["pbpaste", "-Prefer", "txt"]
elif self.clipboard_type == "xclip":
command = ["xclip", "-selection", "clipboard", "-out"]
else:
return False
data = subprocess.check_output(command, universal_newlines=True)
return data
except:
return False
def set_clipboard(self, data):
try:
if self.clipboard_type == "xsel":
command = ["xsel", "-i", "-b"]
elif self.clipboard_type == "pb":
command = ["pbcopy"]
elif self.clipboard_type == "xclip":
command = ["xclip", "-selection", "clipboard", "-in"]
else:
return False
p = subprocess.Popen(command, stdin=subprocess.PIPE)
out, err = p.communicate(input=bytes(data, "utf-8"))
return out
except:
return False
def has_pb_support(self):
output = self.get_output(["which", "pbcopy"])
return output
def has_xsel_support(self):
output = self.get_output(["xsel", "--version"])
return output
def has_xclip_support(self):
output = self.get_output(["which", "xclip"]) # xclip -version outputs to stderr
return output
def get_output(self, cmd):
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except (OSError, EnvironmentError): # can't use FileNotFoundError in Python 2
return False
out, err = process.communicate()
return out
module = {
"class": SystemClipboard,
"name": "system_clipboard",
}
| mit | b252ab8f442ed4c0a734bad1ee0c3b07 | 31.282609 | 114 | 0.548485 | 3.954727 | false | false | false | false |
acq4/acq4 | acq4/analysis/modules/IVCurve/IVCurve.py | 3 | 89299 | # -*- coding: utf-8 -*-
from __future__ import print_function
from six.moves import range
"""
IVCurve: Analysis module that analyzes current-voltage and firing
relationships from current clamp data.
This is part of Acq4
Paul B. Manis, Ph.D.
2011-2013.
Pep8 compliant (via pep8.py) 10/25/2013
Refactoring begun 3/21/2015
"""
from collections import OrderedDict
import os
import os.path
import itertools
import functools
import numpy as np
import scipy
from acq4.util import Qt
from acq4.analysis.AnalysisModule import AnalysisModule
import pyqtgraph as pg
import acq4.util.matplotlibexporter as matplotlibexporter
import acq4.analysis.tools.Utility as Utility # pbm's utilities...
import acq4.analysis.tools.Fitting as Fitting # pbm's fitting stuff...
import acq4.analysis.tools.ScriptProcessor as ScriptProcessor
import pprint
import time
Ui_Form = Qt.importTemplate('.ctrlTemplate')
# noinspection PyPep8
class IVCurve(AnalysisModule):
"""
IVCurve is an Analysis Module for Acq4.
IVCurve performs analyses of current-voltage relationships in
electrophysiology experiments. The module is interactive, and is primarily
designed to allow a preliminary examination of data collected in current clamp and voltage clamp.
Results analyzed include:
Resting potential (average RMP through the episodes in the protocol).
Input resistance (maximum slope if IV relationship below Vrest)
Cell time constant (single exponential fit)
Ih Sag amplitude and tau
Spike rate as a function of injected current
Interspike interval as a function of time for each current level
RMP as a function of time through the protocol
"""
def __init__(self, host):
AnalysisModule.__init__(self, host)
self.Clamps = self.dataModel.GetClamps() # access the "GetClamps" class for reading data
self.data_template = (
OrderedDict([('Species', (12, '{:>12s}')), ('Age', (5, '{:>5s}')), ('Sex', (3, '{:>3s}')), ('Weight', (6, '{:>6s}')),
('Temperature', (10, '{:>10s}')), ('ElapsedTime', (11, '{:>11.2f}')),
('RMP', (5, '{:>5.1f}')), ('Rin', (5, '{:>5.1f}')), ('Bridge', (5, '{:>5.1f}')),
('tau', (5, '{:>5.1f}')), ('AdaptRatio', (9, '{:>9.3f}')),
('tauh', (5, '{:>5.1f}')), ('Gh', (6, '{:>6.2f}')),
('FiringRate', (12, '{:>9.1f}')),
('AP1_HalfWidth', (13, '{:>13.2f}')), ('AP1_Latency', (11, '{:>11.1f}')),
('AP2_HalfWidth', (13, '{:>13.2f}')), ('AP2_Latency', (11, '{:>11.1f}')),
('AHP_Depth', (9, '{:9.2f}')),
('Description', (11, '{:s}')),
]))
self.Script = ScriptProcessor.ScriptProcessor(host)
self.Script.setAnalysis(analysis=self.updateAnalysis,
fileloader = self.loadFileRequested, template = self.data_template,
clamps = self.Clamps, printer=self.printAnalysis,
dbupdate=self.dbStoreClicked) # specify the routines to be called and data sets to be used
self.loaded = None
self.filename = None
self.dirsSet = None
self.lrss_flag = True # show is default
self.lrpk_flag = True
self.rmp_flag = True
self.bridgeCorrection = None # bridge correction in Mohm.
self.showFISI = True # show FISI or ISI as a function of spike number (when False)
self.lrtau_flag = False
self.regions_exist = False
self.tauh_fits = {}
self.tauh_fitted = {}
self.tau_fits = {}
self.tau_fitted = {}
self.regions_exist = False
self.regions = {}
self.analysis_summary = {}
self.tx = None
self.keep_analysis_count = 0
self.dataMarkers = []
self.doUpdates = True
self.colors = ['w', 'g', 'b', 'r', 'y', 'c']
self.symbols = ['o', 's', 't', 'd', '+']
self.color_list = itertools.cycle(self.colors)
self.symbol_list = itertools.cycle(self.symbols)
self.script_header = False
self.Clamps.data_mode = 'IC' # analysis depends on the type of data we have.
self.clear_results()
# --------------graphical elements-----------------
self._sizeHint = (1280, 900) # try to establish size of window
self.ctrlWidget = Qt.QWidget()
self.ctrl = Ui_Form()
self.ctrl.setupUi(self.ctrlWidget)
self.main_layout = pg.GraphicsView() # instead of GraphicsScene?
# make fixed widget for the module output
self.widget = Qt.QWidget()
self.gridLayout = Qt.QGridLayout()
self.widget.setLayout(self.gridLayout)
self.gridLayout.setContentsMargins(4, 4, 4, 4)
self.gridLayout.setSpacing(1)
# Setup basic GUI
self._elements_ = OrderedDict([
('File Loader',
{'type': 'fileInput', 'size': (170, 50), 'host': self}),
('Parameters',
{'type': 'ctrl', 'object': self.ctrlWidget, 'host': self,
'size': (160, 700)}),
('Plots',
{'type': 'ctrl', 'object': self.widget, 'pos': ('right',),
'size': (400, 700)}),
])
self.initializeElements()
self.file_loader_instance = self.getElement('File Loader', create=True)
# grab input form the "Ctrl" window
self.ctrl.IVCurve_Update.clicked.connect(self.updateAnalysis)
self.ctrl.IVCurve_PrintResults.clicked.connect(
functools.partial(self.printAnalysis, printnow=True,
script_header=True))
if not matplotlibexporter.HAVE_MPL:
self.ctrl.IVCurve_MPLExport.setEnabled = False # make button inactive
# self.ctrl.IVCurve_MPLExport.clicked.connect(self.matplotlibExport)
else:
self.ctrl.IVCurve_MPLExport.clicked.connect(
functools.partial(matplotlibexporter.matplotlibExport, gridlayout=self.gridLayout,
title=self.filename))
self.ctrl.IVCurve_KeepAnalysis.clicked.connect(self.resetKeepAnalysis)
self.ctrl.IVCurve_getFileInfo.clicked.connect(self.get_file_information)
[self.ctrl.IVCurve_RMPMode.currentIndexChanged.connect(x)
for x in [self.update_rmpAnalysis, self.analyzeSpikes]]
self.ctrl.IVCurve_FISI_ISI_button.clicked.connect(self.displayFISI_ISI)
self.ctrl.dbStoreBtn.clicked.connect(self.dbStoreClicked)
self.ctrl.IVCurve_OpenScript_Btn.clicked.connect(self.read_script)
self.ctrl.IVCurve_RunScript_Btn.clicked.connect(self.rerun_script)
self.ctrl.IVCurve_PrintScript_Btn.clicked.connect(self.Script.print_script_output)
#self.scripts_form.PSPReversal_ScriptCopy_Btn.clicked.connect(self.copy_script_output)
#self.scripts_form.PSPReversal_ScriptFormatted_Btn.clicked.connect(self.print_formatted_script_output)
self.ctrl.IVCurve_ScriptName.setText('None')
self.layout = self.getElement('Plots', create=True)
# instantiate the graphs using a gridLayout (also facilitates matplotlib export; see export routine below)
self.data_plot = pg.PlotWidget()
self.gridLayout.addWidget(self.data_plot, 0, 0, 3, 1)
self.label_up(self.data_plot, 'T (s)', 'V (V)', 'Data')
self.cmd_plot = pg.PlotWidget()
self.gridLayout.addWidget(self.cmd_plot, 3, 0, 1, 1)
self.label_up(self.cmd_plot, 'T (s)', 'I (A)', 'Command')
self.RMP_plot = pg.PlotWidget()
self.gridLayout.addWidget(self.RMP_plot, 1, 1, 1, 1)
self.label_up(self.RMP_plot, 'T (s)', 'V (mV)', 'RMP')
self.fiPlot = pg.PlotWidget()
self.gridLayout.addWidget(self.fiPlot, 2, 1, 1, 1)
self.label_up(self.fiPlot, 'I (pA)', 'Spikes (#)', 'F-I')
self.fslPlot = pg.PlotWidget()
self.gridLayout.addWidget(self.fslPlot, 3, 1, 1, 1)
self.label_up(self.fslPlot, 'I (pA)', 'Fsl/Fisi (ms)', 'FSL/FISI')
self.IV_plot = pg.PlotWidget()
self.gridLayout.addWidget(self.IV_plot, 0, 1, 1, 1)
self.label_up(self.IV_plot, 'I (pA)', 'V (V)', 'I-V')
for row, s in enumerate([20, 10, 10, 10]):
self.gridLayout.setRowStretch(row, s)
# self.tailPlot = pg.PlotWidget()
# self.gridLayout.addWidget(self.fslPlot, 3, 1, 1, 1)
# self.label_up(self.tailPlot, 'V (V)', 'I (A)', 'Tail Current')
# Add a color scale
self.color_scale = pg.GradientLegend((20, 150), (-10, -10))
self.data_plot.scene().addItem(self.color_scale)
self.ctrl.pushButton.clicked.connect(functools.partial(self.initialize_regions,
reset=True))
def clear_results(self):
"""
Clear results resets variables.
This is typically needed every time a new data set is loaded.
"""
self.filename = ''
self.r_in = 0.0
self.tau = 0.0
self.adapt_ratio = 0.0
self.spikes_counted = False
self.nospk = []
self.spk = []
self.Sequence = ''
self.ivss = [] # steady-state IV (window 2)
self.ivpk = [] # peak IV (window 1)
self.fsl = [] # first spike latency
self.fisi = [] # first isi
self.rmp = [] # resting membrane potential during sequence
self.analysis_summary = {}
self.script_header = True
def resetKeepAnalysis(self):
self.keep_analysis_count = 0 # reset counter.
def show_or_hide(self, lrregion='', forcestate=None):
"""
Show or hide specific regions in the display
Parameters
----------
lrregion : str, default: ''
name of the region('lrwin0', etc)
forcestate : None or Boolean, default: None
Set True to force the show status, False to Hide.
If forcestate is None, then uses the region's 'shstate' value
to set the state.
Returns
-------
nothing
"""
if lrregion == '':
print('PSPReversal:show_or_hide:: lrregion is {:<s}'.format(lrregion))
return
region = self.regions[lrregion]
if forcestate is not None:
if forcestate:
region['region'].show()
region['state'].setChecked(Qt.Qt.Checked)
region['shstate'] = True
else:
region['region'].hide()
region['state'].setChecked(Qt.Qt.Unchecked)
region['shstate'] = False
else:
if not region['shstate']:
region['region'].show()
region['state'].setChecked(Qt.Qt.Checked)
region['shstate'] = True
else:
region['region'].hide()
region['state'].setChecked(Qt.Qt.Unchecked)
region['shstate'] = False
def displayFISI_ISI(self):
"""
Control display of first interspike interval/first spike latency
versus ISI over time.
"""
if self.showFISI: # currently showin FISI/FSL; switch to ISI over time
self.showFISI = False
else:
self.showFISI = True
self.update_SpikePlots()
def initialize_regions(self, reset=False):
"""
initialize_regions sets the linear regions on the displayed data
Here we create the analysis regions in the plot. However, this should
NOT happen until the plot has been created
Note the the information about each region is held in a dictionary,
which for each region has a dictionary that accesses the UI and class
methods for that region. This later simplifies the code and reduces
repetitive sections.
"""
# hold all the linear regions in a dictionary
if not self.regions_exist:
self.regions['lrleak'] = {'name': 'leak', # use a "leak" window
'region': pg.LinearRegionItem([0, 1], orientation=pg.LinearRegionItem.Horizontal,
brush=pg.mkBrush(255, 255, 0, 50.)),
'plot': self.cmd_plot,
'state': self.ctrl.IVCurve_subLeak,
'shstate': False, # keep internal copy of the state
'mode': self.ctrl.IVCurve_subLeak.isChecked(),
'start': self.ctrl.IVCurve_LeakMin,
'stop': self.ctrl.IVCurve_LeakMax,
'updater': self.updateAnalysis,
'units': 'pA'}
self.ctrl.IVCurve_subLeak.region = self.regions['lrleak']['region'] # save region with checkbox
self.regions['lrwin0'] = {'name': 'win0', # peak window
'region': pg.LinearRegionItem([0, 1],
brush=pg.mkBrush(128, 128, 128, 50.)),
'plot': self.data_plot,
'state': self.ctrl.IVCurve_showHide_lrpk,
'shstate': True, # keep internal copy of the state
'mode': None,
'start': self.ctrl.IVCurve_pkTStart,
'stop': self.ctrl.IVCurve_pkTStop,
'updater': self.updateAnalysis,
'units': 'ms'}
self.ctrl.IVCurve_showHide_lrpk.region = self.regions['lrwin0']['region'] # save region with checkbox
self.regions['lrwin1'] = {'name': 'win2', # ss window
'region': pg.LinearRegionItem([0, 1],
brush=pg.mkBrush(0, 0, 255, 50.)),
'plot': self.data_plot,
'state': self.ctrl.IVCurve_showHide_lrss,
'shstate': True, # keep internal copy of the state
'mode': None,
'start': self.ctrl.IVCurve_ssTStart,
'stop': self.ctrl.IVCurve_ssTStop,
'updater': self.updateAnalysis,
'units': 'ms'}
self.ctrl.IVCurve_showHide_lrss.region = self.regions['lrwin1']['region'] # save region with checkbox
# self.lrtau = pg.LinearRegionItem([0, 1],
# brush=pg.mkBrush(255, 0, 0, 50.))
self.regions['lrrmp'] = {'name': 'rmp',
'region': pg.LinearRegionItem([0, 1],
brush=pg.mkBrush
(255, 255, 0, 25.)),
'plot': self.data_plot,
'state': self.ctrl.IVCurve_showHide_lrrmp,
'shstate': True, # keep internal copy of the state
'mode': None,
'start': self.ctrl.IVCurve_rmpTStart,
'stop': self.ctrl.IVCurve_rmpTStop,
'updater': self.update_rmpAnalysis,
'units': 'ms'}
self.ctrl.IVCurve_showHide_lrrmp.region = self.regions['lrrmp']['region'] # save region with checkbox
# establish that measurement is on top, exclusion is next, and reference is on bottom
self.regions['lrtau'] = {'name': 'tau',
'region': pg.LinearRegionItem([0, 1],
brush=pg.mkBrush
(255, 255, 0, 25.)),
'plot': self.data_plot,
'state': self.ctrl.IVCurve_showHide_lrtau,
'shstate': False, # keep internal copy of the state
'mode': None,
'start': self.ctrl.IVCurve_tau2TStart,
'stop': self.ctrl.IVCurve_tau2TStop,
'updater': self.update_Tauh,
'units': 'ms'}
self.ctrl.IVCurve_showHide_lrtau.region = self.regions['lrtau']['region'] # save region with checkbox
self.regions['lrwin0']['region'].setZValue(500)
self.regions['lrwin1']['region'].setZValue(100)
self.regions['lrtau']['region'].setZValue(1000)
self.regions['lrrmp']['region'].setZValue(1000)
self.regions['lrleak']['region'].setZValue(1000)
for regkey, reg in self.regions.items(): # initialize region states
self.show_or_hide(lrregion=regkey, forcestate=reg['shstate'])
for regkey, reg in self.regions.items():
reg['plot'].addItem(reg['region'])
reg['state'].clicked.connect(functools.partial(self.show_or_hide,
lrregion=regkey))
if reg['updater'] is not None:
reg['region'].sigRegionChangeFinished.connect(
functools.partial(reg['updater'], region=reg['name']))
# if self.regions[reg]['mode'] is not None:
# self.regions[reg]['mode'].currentIndexChanged.connect(self.interactive_analysis)
if reset:
for regkey, reg in self.regions.items(): # initialize region states
self.show_or_hide(lrregion=regkey, forcestate=reg['shstate'])
for reg in self.regions.values():
for s in ['start', 'stop']:
reg[s].setSuffix(' ' + reg['units'])
self.regions_exist = True
def get_file_information(self, default_dh=None):
"""
get_file_information reads the sequence information from the
currently selected data file
Two-dimensional sequences are supported.
Parameter
---------
default_dh : data handle, default None
the data handle to use to access the file information
Return
------
nothing:
"""
if default_dh is None:
dh = self.file_loader_instance.selectedFiles()
else:
dh = default_dh
if not dh or len(dh) == 0: # when using scripts, the fileloader may not know..
return
dh = dh[0] # only the first file
sequence = self.dataModel.listSequenceParams(dh)
keys = list(sequence.keys())
leftseq = [str(x) for x in sequence[keys[0]]]
if len(keys) > 1:
rightseq = [str(x) for x in sequence[keys[1]]]
else:
rightseq = []
leftseq.insert(0, 'All')
rightseq.insert(0, 'All')
### specific to our program - relocate
self.ctrl.IVCurve_Sequence1.clear()
self.ctrl.IVCurve_Sequence2.clear()
self.ctrl.IVCurve_Sequence1.addItems(leftseq)
self.ctrl.IVCurve_Sequence2.addItems(rightseq)
self.sequence = sequence
def updaterStatus(self, mode='on'):
"""
Change the auto updater status
"""
for regkey, reg in self.regions.items():
if mode in ['on', 'On', True]:
self.doUpdates = True
reg['region'].sigRegionChangeFinished.connect(
functools.partial(reg['updater'], region=reg['name']))
if mode in ['off', 'Off', None, False]:
self.doUpdates = False
try:
reg['region'].sigRegionChangeFinished.disconnect()
except: # may already be disconnected...so fail gracefully
pass
def loadFileRequested(self, dh, analyze=True, bridge=None):
"""
loadFileRequested is called by "file loader" when a file is requested.
FileLoader is provided by the AnalysisModule class
dh is the handle to the currently selected directory (or directories)
This function loads all of the successive records from the specified protocol.
Ancillary information from the protocol is stored in class variables.
Extracts information about the commands, sometimes using a rather
simplified set of assumptions. Much of the work for reading the data is
performed in the GetClamps class in PatchEPhys.
:param dh: the directory handle (or list of handles) representing the selected
entitites from the FileLoader in the Analysis Module
:modifies: plots, sequence, data arrays, data mode, etc.
:return: True if successful; otherwise raises an exception
"""
self.data_plot.clearPlots()
self.cmd_plot.clearPlots()
self.clear_results()
self.updaterStatus('Off')
if len(dh) == 0:
raise Exception("IVCurve::loadFileRequested: " +
"Select an IV protocol directory.")
if len(dh) != 1:
raise Exception("IVCurve::loadFileRequested: " +
"Can only load one file at a time.")
self.get_file_information(default_dh=dh) # Get info from most recent file requested
dh = dh[0] # just get the first one
self.filename = dh.name()
self.current_dirhandle = dh # this is critical!
self.loaded = dh
self.analysis_summary = self.dataModel.cell_summary(dh) # get other info as needed for the protocol
# print 'analysis summary: ', self.analysis_summary
pars = {} # need to pass some parameters from the GUI
pars['limits'] = self.ctrl.IVCurve_IVLimits.isChecked() # checkbox: True if loading limited current range
pars['cmin'] = self.ctrl.IVCurve_IVLimitMin.value() # minimum current level to load
pars['cmax'] = self.ctrl.IVCurve_IVLimitMax.value() # maximum current level to load
pars['KeepT'] = self.ctrl.IVCurve_KeepT.isChecked() # keep timebase
# sequence selections:
# pars[''sequence'] is a dictionary
# The dictionary has 'index' (currentIndex()) and 'count' from the GUI
pars['sequence1'] = {'index': [self.ctrl.IVCurve_Sequence1.currentIndex() - 1]}
pars['sequence1']['count'] = self.ctrl.IVCurve_Sequence1.count() - 1
pars['sequence2'] = {'index': [self.ctrl.IVCurve_Sequence2.currentIndex() - 1]}
pars['sequence2']['count'] = self.ctrl.IVCurve_Sequence2.count() - 1
ci = self.Clamps.getClampData(dh, pars)
if ci is None:
return False
self.ctrl.IVCurve_dataMode.setText(self.Clamps.data_mode)
# self.bridgeCorrection = 200e6
# print 'bridge: ', bridge
if bridge is not None:
self.bridgeCorrection = bridge
self.ctrl.IVCurve_bridge.setValue(self.bridgeCorrection)
#for i in range(self.Clamps.traces.shape[0]):
print('******** Doing bridge correction: ', self.bridgeCorrection)
self.Clamps.traces = self.Clamps.traces - (self.bridgeCorrection * self.Clamps.cmd_wave)
else:
br = self.ctrl.IVCurve_bridge.value()*1e6
# print 'br: ', br
if br != 0.0:
self.bridgeCorrection = br
self.Clamps.traces = self.Clamps.traces - (self.bridgeCorrection * self.Clamps.cmd_wave)
else:
self.bridgeCorrection = None
# now plot the data
self.ctrl.IVCurve_tauh_Commands.clear()
self.ctrl.IVCurve_tauh_Commands.addItems(ci['cmdList'])
self.color_scale.setIntColorScale(0, len(ci['dirs']), maxValue=200)
self.make_map_symbols()
self.plot_traces()
self.setup_regions()
self.get_window_analysisPars() # prepare the analysis parameters
self.updaterStatus('on') # re-enable update status
if analyze: # only do this if requested (default). Don't do in script processing ....yet
self.updateAnalysis()
return True
def plot_traces(self, multimode=False):
"""
Plot the current data traces.
:param multimode: try using "multiline plot routine" to speed up plots (no color though)
:return: nothing
"""
if self.ctrl.IVCurve_KeepAnalysis.isChecked():
self.keep_analysis_count += 1
else:
self.keep_analysis_count = 0 # always make sure is reset
# this is the only way to reset iterators.
self.color_list = itertools.cycle(self.colors)
self.symbol_list = itertools.cycle(self.symbols)
self.clearDecorators()
self.make_map_symbols()
self.data_plot.plotItem.clearPlots()
self.cmd_plot.plotItem.clearPlots()
ntr = self.Clamps.traces.shape[0]
self.data_plot.setDownsampling(auto=False, mode='mean')
self.data_plot.setClipToView(False) # setting True deletes some points used for decoration of spikes by shape
self.cmd_plot.setDownsampling(auto=False, mode='mean')
self.cmd_plot.setClipToView(True) # can leave this true since we do not put symbols on the plot
self.data_plot.disableAutoRange()
self.cmd_plot.disableAutoRange()
cmdindxs = np.unique(self.Clamps.commandLevels) # find the unique voltages
colindxs = [int(np.where(cmdindxs == self.Clamps.commandLevels[i])[0]) for i in range(len(self.Clamps.commandLevels))] # make a list to use
if multimode:
pass
# datalines = MultiLine(self.Clamps.time_base, self.Clamps.traces, downsample=10)
# self.data_plot.addItem(datalines)
# cmdlines = MultiLine(self.Clamps.time_base, self.Clamps.cmd_wave, downsample=10)
# self.cmd_plot.addItem(cmdlines)
else:
for i in range(ntr):
atrace = self.Clamps.traces[i]
acmdwave = self.Clamps.cmd_wave[i]
self.data_plot.plot(x=self.Clamps.time_base, y=atrace, downSample=10, downSampleMethod='mean',
pen=pg.intColor(colindxs[i], len(cmdindxs), maxValue=255))
self.cmd_plot.plot(x=self.Clamps.time_base, y=acmdwave, downSample=10, downSampleMethod='mean',
pen=pg.intColor(colindxs[i], len(cmdindxs), maxValue=255))
if self.Clamps.data_mode in self.dataModel.ic_modes:
self.label_up(self.data_plot, 'T (s)', 'V (V)', 'Data')
self.label_up(self.cmd_plot, 'T (s)', 'I (%s)' % self.Clamps.command_units, 'Data')
elif self.Clamps.data_mode in self.dataModel.vc_modes: # voltage clamp
self.label_up(self.data_plot, 'T (s)', 'I (A)', 'Data')
self.label_up(self.cmd_plot, 'T (s)', 'V (%s)' % self.Clamps.command_units, 'Data')
else: # mode is not known: plot both as V
self.label_up(self.data_plot, 'T (s)', 'V (V)', 'Data')
self.label_up(self.cmd_plot, 'T (s)', 'V (%s)' % self.Clamps.command_units, 'Data')
self.data_plot.autoRange()
self.cmd_plot.autoRange()
def setup_regions(self):
"""
Initialize the positions of the lr regions on the display.
We attempt to use a logical set of values based on the timing of command steps
and stimulus events
:return:
"""
self.initialize_regions() # now create the analysis regions, if not already existing
if self.ctrl.IVCurve_KeepT.isChecked() is False: # change regions; otherwise keep...
tstart_pk = self.Clamps.tstart
tdur_pk = self.Clamps.tdur * 0.4 # use first 40% of trace for peak
tstart_ss = self.Clamps.tstart + 0.75 * self.Clamps.tdur
tdur_ss = self.Clamps.tdur * 0.25
tstart_tau = self.Clamps.tstart + 0.1 * self.Clamps.tdur
tdur_tau = 0.9 * self.Clamps.tdur
# tauh window
self.regions['lrtau']['region'].setRegion([tstart_tau,
tstart_tau + tdur_tau])
# peak voltage window
self.regions['lrwin0']['region'].setRegion([tstart_pk,
tstart_pk + tdur_pk])
# steady-state meausurement:
self.regions['lrwin1']['region'].setRegion([tstart_ss,
tstart_ss + tdur_ss])
# rmp measurement
self.regions['lrrmp']['region'].setRegion([0., self.Clamps.tstart * 0.9]) # rmp window
# print 'rmp window region: ', self.Clamps.tstart * 0.9
for r in ['lrtau', 'lrwin0', 'lrwin1', 'lrrmp']:
self.regions[r]['region'].setBounds([0., np.max(self.Clamps.time_base)]) # limit regions to data
def get_window_analysisPars(self):
"""
Retrieve the settings of the lr region windows, and some other general values
in preparation for analysis
:return:
"""
self.analysis_parameters = {} # start out empty so we are not fooled by priors
for region in ['lrleak', 'lrwin0', 'lrwin1', 'lrrmp', 'lrtau']:
rgninfo = self.regions[region]['region'].getRegion() # from the display
self.regions[region]['start'].setValue(rgninfo[0] * 1.0e3) # report values to screen
self.regions[region]['stop'].setValue(rgninfo[1] * 1.0e3)
self.analysis_parameters[region] = {'times': rgninfo}
# for region in ['lrwin0', 'lrwin1', 'lrwin2']:
# if self.regions[region]['mode'] is not None:
# self.analysis_parameters[region]['mode'] = self.regions[region]['mode'].currentText()
# self.get_alternation() # get values into the analysisPars dictionary
# self.get_baseline()
# self.get_junction()
def updateAnalysis(self, presets=None, region=None):
"""updateAnalysis re-reads the time parameters and re-analyzes the spikes"""
# print 'self.Script.script: ', self.Script.script['Cells'].keys()
if presets in [True, False]:
presets = None
# print '\n\n*******\n', traceback.format_stack(limit=7)
if presets is not None and type(presets) == type({}): # copy from dictionary of presets into analysis parameters
for k in presets.keys():
self.analysis_summary[k] = presets[k]
if 'SpikeThreshold' in presets.keys():
self.ctrl.IVCurve_SpikeThreshold.setValue(float(presets['SpikeThreshold']))
#print 'set threshold to %f' % float(presets['SpikeThreshold'])
if 'bridgeCorrection' in presets.keys():
self.bridgeCorrection = presets['bridgeCorrection']
print('####### BRIDGE CORRRECTION #######: ', self.bridgeCorrection)
else:
self.bridgeCorrection = 0.
self.get_window_analysisPars()
# print 'updateanalysis: readparsupdate'
self.readParsUpdate(clearFlag=True, pw=False)
def readParsUpdate(self, clearFlag=False, pw=False):
"""
Read the parameter window entries, set the lr regions to the values
in the window, and do an update on the analysis
Parameters
----------
clearFlag : Boolean, False
appears to be unused
pw : Boolean, False
appears to be unused
"""
if not self.doUpdates:
return
# analyze spikes first (gets information on which traces to exclude/include for other calculations)
# print 'readparsupdate, calling analyze spikes'
self.analyzeSpikes()
self.analysis_summary['tauh'] = np.nan # define these because they may not get filled...
self.analysis_summary['Gh'] = np.nan
(pen, filledbrush, emptybrush, symbol, n, clearFlag) = self.map_symbol()
# update RMP first as we might need it for the others.
if self.ctrl.IVCurve_showHide_lrrmp.isChecked():
rgnx1 = self.ctrl.IVCurve_rmpTStart.value() / 1.0e3
rgnx2 = self.ctrl.IVCurve_rmpTStop.value() / 1.0e3
self.regions['lrrmp']['region'].setRegion([rgnx1, rgnx2])
self.update_rmpAnalysis(clear=clearFlag, pw=pw)
if self.ctrl.IVCurve_showHide_lrss.isChecked():
rgnx1 = self.ctrl.IVCurve_ssTStart.value() / 1.0e3
rgnx2 = self.ctrl.IVCurve_ssTStop.value() / 1.0e3
self.regions['lrwin1']['region'].setRegion([rgnx1, rgnx2])
self.update_ssAnalysis()
if self.ctrl.IVCurve_showHide_lrpk.isChecked():
rgnx1 = self.ctrl.IVCurve_pkTStart.value() / 1.0e3
rgnx2 = self.ctrl.IVCurve_pkTStop.value() / 1.0e3
self.regions['lrwin0']['region'].setRegion([rgnx1, rgnx2])
self.update_pkAnalysis(clear=clearFlag, pw=pw)
if self.ctrl.IVCurve_subLeak.isChecked():
rgnx1 = self.ctrl.IVCurve_LeakMin.value() / 1e3
rgnx2 = self.ctrl.IVCurve_LeakMax.value() / 1e3
self.regions['lrleak']['region'].setRegion([rgnx1, rgnx2])
self.update_ssAnalysis()
self.update_pkAnalysis()
if self.ctrl.IVCurve_showHide_lrtau.isChecked():
# include tau in the list... if the tool is selected
rgnx1 = self.ctrl.IVCurve_tau2TStart.value() / 1e3
rgnx2 = self.ctrl.IVCurve_tau2TStop.value() / 1e3
self.regions['lrtau']['region'].setRegion([rgnx1, rgnx2])
self.update_Tauh()
if self.ctrl.IVCurve_PeakMode.currentIndexChanged:
self.peakmode = self.ctrl.IVCurve_PeakMode.currentText()
self.update_pkAnalysis()
self.analyzeSpikeShape() # finally do the spike shape
self.ctrl.IVCurve_bridge.setValue(0.) # reset bridge value after analysis.
def read_script(self, name=''):
"""
read a script file from disk, and use that information to drive the analysis
:param name:
:return:
"""
self.script_name = self.Script.read_script()
if self.script_name is None:
print('Failed to read script')
self.ctrl.IVCurve_ScriptName.setText('None')
return
self.ctrl.IVCurve_ScriptName.setText(os.path.basename(self.script_name))
self.Script.run_script()
def rerun_script(self):
"""
revalidate and run the current script
:return:
"""
self.Script.run_script()
def analyzeSpikes(self):
"""
analyzeSpikes: Using the threshold set in the control panel, count the
number of spikes in the stimulation window (self.Clamps.tstart, self.Clamps.tend)
Updates the spike plot(s).
The following variables are set:
self.spikecount: a 1-D numpy array of spike counts, aligned with the
current (command)
self.adapt_ratio: the adaptation ratio of the spike train
self.fsl: a numpy array of first spike latency for each command level
self.fisi: a numpy array of first interspike intervals for each
command level
self.nospk: the indices of command levels where no spike was detected
self.spk: the indices of command levels were at least one spike
was detected
"""
if self.keep_analysis_count == 0:
clearFlag = True
else:
clearFlag = False
self.analysis_summary['FI_Curve'] = None
# print '***** analyzing Spikes'
if self.Clamps.data_mode not in self.dataModel.ic_modes or self.Clamps.time_base is None:
print('IVCurve::analyzeSpikes: Cannot count spikes, ' +
'and dataMode is ', self.Clamps.data_mode, 'and ICModes are: ', self.dataModel.ic_modes, 'tx is: ', self.tx)
self.spikecount = []
self.fiPlot.plot(x=[], y=[], clear=clearFlag, pen='w',
symbolSize=6, symbolPen='b',
symbolBrush=(0, 0, 255, 200), symbol='s')
self.fslPlot.plot(x=[], y=[], pen='w', clear=clearFlag,
symbolSize=6, symbolPen='g',
symbolBrush=(0, 255, 0, 200), symbol='t')
self.fslPlot.plot(x=[], y=[], pen='w', symbolSize=6,
symbolPen='y',
symbolBrush=(255, 255, 0, 200), symbol='s')
return
twin = self.Clamps.tend - self.Clamps.tstart # measurements window in seconds
maxspkrate = 50 # max rate to count in adaptation is 50 spikes/second
minspk = 4
maxspk = int(maxspkrate*twin) # scale max dount by range of spike counts
threshold = self.ctrl.IVCurve_SpikeThreshold.value() * 1e-3
self.analysis_summary['SpikeThreshold'] = self.ctrl.IVCurve_SpikeThreshold.value()
ntr = len(self.Clamps.traces)
self.spikecount = np.zeros(ntr)
self.fsl = np.zeros(ntr)
self.fisi = np.zeros(ntr)
ar = np.zeros(ntr)
self.allisi = {}
self.spikes = [[] for i in range(ntr)]
self.spikeIndices = [[] for i in range(ntr)]
#print 'clamp start/end: ', self.Clamps.tstart, self.Clamps.tend
for i in range(ntr):
(spikes, spkx) = Utility.findspikes(self.Clamps.time_base, self.Clamps.traces[i],
threshold, t0=self.Clamps.tstart,
t1=self.Clamps.tend,
dt=self.Clamps.sample_interval,
mode='peak', # best to use peak for detection
interpolate=False,
debug=False)
if len(spikes) == 0:
#print 'no spikes found'
continue
self.spikes[i] = spikes
#print 'found %d spikes in trace %d' % (len(spikes), i)
self.spikeIndices[i] = [np.argmin(np.fabs(self.Clamps.time_base-t)) for t in spikes]
self.spikecount[i] = len(spikes)
self.fsl[i] = (spikes[0] - self.Clamps.tstart)*1e3
if len(spikes) > 1:
self.fisi[i] = (spikes[1] - spikes[0])*1e3
self.allisi[i] = np.diff(spikes)*1e3
# for Adaptation ratio analysis
if minspk <= len(spikes) <= maxspk:
misi = np.mean(np.diff(spikes[-3:]))*1e3
ar[i] = misi / self.fisi[i]
iAR = np.where(ar > 0)
self.adapt_ratio = np.mean(ar[iAR]) # only where we made the measurement
self.analysis_summary['AdaptRatio'] = self.adapt_ratio
self.ctrl.IVCurve_AR.setText(u'%7.3f' % self.adapt_ratio)
self.nospk = np.where(self.spikecount == 0)
self.spk = np.where(self.spikecount > 0)[0]
self.analysis_summary['FI_Curve'] = np.array([self.Clamps.values, self.spikecount])
# print self.analysis_summary['FI_Curve']
self.spikes_counted = True
self.update_SpikePlots()
def _timeindex(self, t):
return np.argmin(self.Clamps.time_base-t)
def analyzeSpikeShape(self, printSpikeInfo=False):
# analyze the spike shape.
# based on Druckman et al. Cerebral Cortex, 2013
begin_dV = 12.0 # V/s or mV/ms
ntr = len(self.Clamps.traces)
# print 'analyzespikeshape, self.spk: ', self.spk
self.spikeShape = OrderedDict()
rmp = np.zeros(ntr)
iHold = np.zeros(ntr)
for i in range(ntr):
if len(self.spikes[i]) == 0:
continue
trspikes = OrderedDict()
if printSpikeInfo:
print(np.array(self.Clamps.values))
print(len(self.Clamps.traces))
(rmp[i], r2) = Utility.measure('mean', self.Clamps.time_base, self.Clamps.traces[i],
0.0, self.Clamps.tstart)
(iHold[i], r2) = Utility.measure('mean', self.Clamps.time_base, self.Clamps.cmd_wave[i],
0.0, self.Clamps.tstart)
for j in range(len(self.spikes[i])):
thisspike = {'trace': i, 'AP_number': j, 'AP_beginIndex': None, 'AP_endIndex': None,
'peakIndex': None, 'peak_T': None, 'peak_V': None, 'AP_Latency': None,
'AP_beginV': None, 'halfwidth': None, 'trough_T': None,
'trough_V': None, 'peaktotroughT': None,
'current': None, 'iHold': None,
'pulseDuration': None, 'tstart': self.Clamps.tstart} # initialize the structure
thisspike['current'] = self.Clamps.values[i] - iHold[i]
thisspike['iHold'] = iHold[i]
thisspike['pulseDuration'] = self.Clamps.tend - self.Clamps.tstart # in seconds
thisspike['peakIndex'] = self.spikeIndices[i][j]
thisspike['peak_T'] = self.Clamps.time_base[thisspike['peakIndex']]
thisspike['peak_V'] = self.Clamps.traces[i][thisspike['peakIndex']] # max voltage of spike
thisspike['tstart'] = self.Clamps.tstart
# find the minimum going forward - that is AHP min
dt = (self.Clamps.time_base[1]-self.Clamps.time_base[0])
dv = np.diff(self.Clamps.traces[i])/dt
k = self.spikeIndices[i][j] + 1
if j < self.spikecount[i] - 1: # find end of spike (top of next, or end of trace)
kend = self.spikeIndices[i][j+1]
else:
kend = len(self.Clamps.traces[i])
try:
km = np.argmin(dv[k:kend])+k # find fastst falling point, use that for start of detection
except:
continue
# v = self.Clamps.traces[i][km]
# vlast = self.Clamps.traces[i][km]
#kmin = np.argmin(np.argmin(dv2[k:kend])) + k # np.argmin(np.fabs(self.Clamps.traces[i][k:kend]))+k
kmin = np.argmin(self.Clamps.traces[i][km:kend])+km
thisspike['AP_endIndex'] = kmin
thisspike['trough_T'] = self.Clamps.time_base[thisspike['AP_endIndex']]
thisspike['trough_V'] = self.Clamps.traces[i][kmin]
if thisspike['AP_endIndex'] is not None:
thisspike['peaktotrough'] = thisspike['trough_T'] - thisspike['peak_T']
k = self.spikeIndices[i][j]-1
if j > 0:
kbegin = self.spikeIndices[i][j-1] # trspikes[j-1]['AP_endIndex'] # self.spikeIndices[i][j-1] # index to previ spike start
else:
kbegin = k - int(0.002/dt) # for first spike - 4 msec prior only
if kbegin*dt <= self.Clamps.tstart:
kbegin = kbegin + int(0.0002/dt) # 1 msec
# revise k to start at max of rising phase
try:
km = np.argmax(dv[kbegin:k]) + kbegin
except:
continue
if (km - kbegin < 1):
km = kbegin + int((k - kbegin)/2.) + 1
kthresh = np.argmin(np.fabs(dv[kbegin:km] - begin_dV)) + kbegin # point where slope is closest to begin
thisspike['AP_beginIndex'] = kthresh
thisspike['AP_Latency'] = self.Clamps.time_base[kthresh]
thisspike['AP_beginV'] = self.Clamps.traces[i][thisspike['AP_beginIndex']]
if thisspike['AP_beginIndex'] is not None and thisspike['AP_endIndex'] is not None:
halfv = 0.5*(thisspike['peak_V'] + thisspike['AP_beginV'])
kup = np.argmin(np.fabs(self.Clamps.traces[i][thisspike['AP_beginIndex']:thisspike['peakIndex']] - halfv))
kup += thisspike['AP_beginIndex']
kdown = np.argmin(np.fabs(self.Clamps.traces[i][thisspike['peakIndex']:thisspike['AP_endIndex']] - halfv))
kdown += thisspike['peakIndex']
if kup is not None and kdown is not None:
thisspike['halfwidth'] = self.Clamps.time_base[kdown] - self.Clamps.time_base[kup]
thisspike['hw_up'] = self.Clamps.time_base[kup]
thisspike['hw_down'] = self.Clamps.time_base[kdown]
thisspike['hw_v'] = halfv
trspikes[j] = thisspike
self.spikeShape[i] = trspikes
if printSpikeInfo:
pp = pprint.PrettyPrinter(indent=4)
for m in sorted(self.spikeShape.keys()):
print('----\nTrace: %d has %d APs' % (m, len(list(self.spikeShape[m].keys()))))
for n in sorted(self.spikeShape[m].keys()):
pp.pprint(self.spikeShape[m][n])
self.analysis_summary['spikes'] = self.spikeShape # save in the summary dictionary too
self.analysis_summary['iHold'] = np.mean(iHold)
self.analysis_summary['pulseDuration'] = self.Clamps.tend - self.Clamps.tstart
self.getClassifyingInfo() # build analysis summary here as well.
self.clearDecorators()
self.spikeDecorator()
def spikeDecorator(self):
"""
Put markers on the spikes to visually confirm the analysis of thresholds, etc.
"""
# get colors
cmdindxs = np.unique(self.Clamps.commandLevels) # find the unique voltages
colindxs = [int(np.where(cmdindxs == self.Clamps.commandLevels[i])[0]) for i in range(len(self.Clamps.commandLevels))] # make a list to use
alllats = []
allpeakt = []
allpeakv = []
for i, trace in enumerate(self.spikeShape):
aps = []
tps = []
paps = []
ptps = []
taps = []
ttps = []
hwv = []
tups = []
tdps = []
for j, spk in enumerate(self.spikeShape[trace]):
aps.append(self.spikeShape[trace][spk]['AP_beginV'])
alllats.append(self.spikeShape[trace][spk]['AP_Latency'])
tps.append(self.spikeShape[trace][spk]['AP_Latency'])
u =self.data_plot.plot(tps, aps, pen=None, symbol='o', brush=pg.mkBrush('g'), symbolSize=4)
self.dataMarkers.append(u)
for j, spk in enumerate(self.spikeShape[trace]):
paps.append(self.spikeShape[trace][spk]['peak_V'])
ptps.append(self.spikeShape[trace][spk]['peak_T'])
allpeakt.append(self.spikeShape[trace][spk]['peak_T']+0.01)
allpeakv.append(self.spikeShape[trace][spk]['peak_V'])
# u = self.data_plot.plot(allpeakt, allpeakv, pen=None, symbol='o', brush=pg.mkBrush('r'), size=2)
# self.dataMarkers.append(u)
u = self.data_plot.plot(ptps, paps, pen=None, symbol='t', brush=pg.mkBrush('w'), symbolSize=4)
self.dataMarkers.append(u)
for j, spk in enumerate(self.spikeShape[trace]):
taps.append(self.spikeShape[trace][spk]['trough_V'])
ttps.append(self.spikeShape[trace][spk]['trough_T'])
u = self.data_plot.plot(ttps, taps, pen=None, symbol='+', brush=pg.mkBrush('r'), symbolSize=4)
self.dataMarkers.append(u)
for j, spk in enumerate(self.spikeShape[trace]):
tups.append(self.spikeShape[trace][spk]['hw_up'])
tdps.append(self.spikeShape[trace][spk]['hw_down'])
hwv.append(self.spikeShape[trace][spk]['hw_v'])
u =self.data_plot.plot(tups, hwv, pen=None, symbol='d', brush=pg.mkBrush('c'), symbolSize=4)
self.dataMarkers.append(u)
d =self.data_plot.plot(tdps, hwv, pen=None, symbol='s', brush=pg.mkBrush('c'), symbolSize=4)
self.dataMarkers.append(d)
def clearDecorators(self):
if len(self.dataMarkers) > 0:
[self.dataMarkers[k].clear() for k,m in enumerate(self.dataMarkers)]
self.dataMarkers = []
def getIVCurrentThresholds(self):
# figure out "threshold" for spike, get 150% and 300% points.
nsp = []
icmd = []
for m in sorted(self.spikeShape.keys()):
n = len(self.spikeShape[m].keys()) # number of spikes in the trace
if n > 0:
nsp.append(len(self.spikeShape[m].keys()))
icmd.append(self.spikeShape[m][0]['current'])
try:
iamin = np.argmin(icmd)
except:
raise ValueError('IVCurve:getIVCurrentThresholds - icmd seems to be ? : ', icmd)
imin = np.min(icmd)
ia150 = np.argmin(np.abs(1.5*imin-np.array(icmd)))
iacmdthr = np.argmin(np.abs(imin-self.Clamps.values))
ia150cmdthr = np.argmin(np.abs(icmd[ia150] - self.Clamps.values))
#print 'thr indices and values: ', iacmdthr, ia150cmdthr, self.Clamps.values[iacmdthr], self.Clamps.values[ia150cmdthr]
return (iacmdthr, ia150cmdthr) # return threshold indices into self.Clamps.values array at threshold and 150% point
def getClassifyingInfo(self):
"""
Adds the classifying information according to Druckmann et al., Cerebral Cortex, 2013
to the analysis summary
"""
(jthr, j150) = self.getIVCurrentThresholds() # get the indices for the traces we need to pull data from
if jthr == j150:
print('\n%s:' % self.filename)
print('Threshold current T and 1.5T the same: using next up value for j150')
print('jthr, j150, len(spikeShape): ', jthr, j150, len(self.spikeShape))
print('1 ', self.spikeShape[jthr][0]['current']*1e12)
print('2 ', self.spikeShape[j150+1][0]['current']*1e12)
print(' >> Threshold current: %8.3f 1.5T current: %8.3f, next up: %8.3f' % (self.spikeShape[jthr][0]['current']*1e12,
self.spikeShape[j150][0]['current']*1e12, self.spikeShape[j150+1][0]['current']*1e12))
j150 = jthr + 1
if len(self.spikeShape[j150]) >= 1 and self.spikeShape[j150][0]['halfwidth'] is not None:
self.analysis_summary['AP1_Latency'] = (self.spikeShape[j150][0]['AP_Latency'] - self.spikeShape[j150][0]['tstart'])*1e3
self.analysis_summary['AP1_HalfWidth'] = self.spikeShape[j150][0]['halfwidth']*1e3
else:
self.analysis_summary['AP1_Latency'] = np.inf
self.analysis_summary['AP1_HalfWidth'] = np.inf
if len(self.spikeShape[j150]) >= 2 and self.spikeShape[j150][1]['halfwidth'] is not None:
self.analysis_summary['AP2_Latency'] = (self.spikeShape[j150][1]['AP_Latency'] - self.spikeShape[j150][1]['tstart'])*1e3
self.analysis_summary['AP2_HalfWidth'] = self.spikeShape[j150][1]['halfwidth']*1e3
else:
self.analysis_summary['AP2_Latency'] = np.inf
self.analysis_summary['AP2_HalfWidth'] = np.inf
rate = len(self.spikeShape[j150])/self.spikeShape[j150][0]['pulseDuration'] # spikes per second, normalized for pulse duration
# first AHP depth
# print 'j150: ', j150
# print self.spikeShape[j150][0].keys()
# print self.spikeShape[j150]
AHPDepth = self.spikeShape[j150][0]['AP_beginV'] - self.spikeShape[j150][0]['trough_V']
self.analysis_summary['FiringRate'] = rate
self.analysis_summary['AHP_Depth'] = AHPDepth*1e3 # convert to mV
# pprint.pprint(self.analysis_summary)
# except:
# raise ValueError ('Failed Classification for cell: %s' % self.filename)
def update_Tau_membrane(self, peak_time=None, printWindow=False, whichTau=1, vrange=[-5., -20.]):
"""
Compute time constant (single exponential) from the
onset of the response
using lrpk window, and only steps that produce a voltage change between 5 and 20 mV below rest
or as specified
"""
if len(self.Clamps.commandLevels) == 0: # probably not ready yet to do the update.
return
if self.Clamps.data_mode not in self.dataModel.ic_modes: # only permit in IC
return
rgnpk = list(self.regions['lrwin0']['region'].getRegion())
Func = 'exp1' # single exponential fit with DC offset.
Fits = Fitting.Fitting()
if self.rmp == []:
self.update_rmpAnalysis()
#print self.rmp
initpars = [self.rmp*1e-3, 0.010, 0.01]
peak_time = None
icmdneg = np.where(self.Clamps.commandLevels < -20e-12)
maxcmd = np.min(self.Clamps.commandLevels)
ineg = np.where(self.Clamps.commandLevels[icmdneg] < 0.0)
if peak_time is not None and ineg != np.array([]):
rgnpk[1] = np.max(peak_time[ineg[0]])
dt = self.Clamps.sample_interval
rgnindx = [int((rgnpk[1]-0.005)/dt), int((rgnpk[1])/dt)]
rmps = self.ivbaseline
vmeans = np.mean(self.Clamps.traces[:, rgnindx[0]:rgnindx[1]].view(np.ndarray), axis=1) - self.ivbaseline
indxs = np.where(np.logical_and((vrange[0]*1e-3 >= vmeans[ineg]),
(vmeans[ineg] >= vrange[1]*1e-3)))
indxs = list(indxs[0])
whichdata = ineg[0][indxs] # restricts to valid values
itaucmd = self.Clamps.commandLevels[ineg]
whichaxis = 0
fpar = []
names = []
okdata = []
if len(self.tau_fitted.keys()) > 0:
[self.tau_fitted[k].clear() for k in self.tau_fitted.keys()]
self.tau_fitted = {}
for j, k in enumerate(whichdata):
self.tau_fitted[j] = self.data_plot.plot(self.Clamps.time_base, self.Clamps.traces[k], pen=pg.mkPen('w'))
(fparx, xf, yf, namesx) = Fits.FitRegion([k], whichaxis,
self.Clamps.time_base,
self.Clamps.traces,
dataType='2d',
t0=rgnpk[0], t1=rgnpk[1],
fitFunc=Func,
fitPars=initpars,
method='SLSQP',
bounds=[(-0.1, 0.1), (-0.1, 0.1), (0.005, 0.30)])
if not fparx:
raise Exception('IVCurve::update_Tau_membrane: Charging tau fitting failed - see log')
#print 'j: ', j, len(fpar)
if fparx[0][1] < 2.5e-3: # amplitude must be > 2.5 mV to be useful
continue
fpar.append(fparx[0])
names.append(namesx[0])
okdata.append(k)
self.taupars = fpar
self.tauwin = rgnpk
self.taufunc = Func
self.whichdata = okdata
taus = []
for j in range(len(fpar)):
outstr = ""
taus.append(fpar[j][2])
for i in range(0, len(names[j])):
outstr += '%s = %f, ' % (names[j][i], fpar[j][i])
if printWindow:
print("FIT(%d, %.1f pA): %s " %
(whichdata[j], itaucmd[j] * 1e12, outstr))
meantau = np.mean(taus)
self.ctrl.IVCurve_Tau.setText(u'%18.1f ms' % (meantau * 1.e3))
self.tau = meantau
self.analysis_summary['tau'] = self.tau*1.e3
tautext = 'Mean Tau: %8.1f'
if printWindow:
print(tautext % (meantau * 1e3))
self.show_tau_plot()
def show_tau_plot(self):
Fits = Fitting.Fitting()
fitPars = self.taupars
xFit = np.zeros((len(self.taupars), 500))
for i in range(len(self.taupars)):
xFit[i,:] = np.arange(0, self.tauwin[1]-self.tauwin[0], (self.tauwin[1]-self.tauwin[0])/500.)
yFit = np.zeros((len(fitPars), xFit.shape[1]))
fitfunc = Fits.fitfuncmap[self.taufunc]
if len(self.tau_fits.keys()) > 0:
[self.tau_fits[k].clear() for k in self.tau_fits.keys()]
self.tau_fits = {}
for k, whichdata in enumerate(self.whichdata):
yFit[k] = fitfunc[0](fitPars[k], xFit[k], C=None) # +self.ivbaseline[whichdata]
self.tau_fits[k] = self.data_plot.plot(xFit[k]+self.tauwin[0], yFit[k], pen=pg.mkPen('r', width=2, style=Qt.Qt.DashLine))
def update_Tauh(self, region=None, printWindow=False):
""" compute tau (single exponential) from the onset of the markers
using lrtau window, and only for the step closest to the selected
current level in the GUI window.
Parameters
----------
region : dummy argument, default : None
printWindow : Boolean, default : False
region is a dummy argument...
Also compute the ratio of the sag from the peak (marker1) to the
end of the trace (marker 2).
Based on analysis in Fujino and Oertel, J. Neuroscience 2001,
to type cells based on different Ih kinetics and magnitude.
"""
self.analysis_summary['tauh'] = np.nan
self.analysis_summary['Gh'] = np.nan
if not self.ctrl.IVCurve_showHide_lrtau.isChecked():
return
rgn = self.regions['lrtau']['region'].getRegion()
Func = 'exp1' # single exponential fit to the whole region
Fits = Fitting.Fitting()
initpars = [-80.0 * 1e-3, -10.0 * 1e-3, 50.0 * 1e-3]
# find the current level that is closest to the target current
s_target = self.ctrl.IVCurve_tauh_Commands.currentIndex()
itarget = self.Clamps.values[s_target] # retrive actual value from commands
self.neg_cmd = itarget
idiff = np.abs(np.array(self.Clamps.commandLevels) - itarget)
amin = np.argmin(idiff) # amin appears to be the same as s_target
# target trace (as selected in cmd drop-down list):
target = self.Clamps.traces[amin]
# get Vrmp - # rmp approximation.
vrmp = np.median(target['Time': 0.0:self.Clamps.tstart - 0.005]) * 1000.
self.neg_vrmp = vrmp
# get peak and steady-state voltages
pkRgn = self.regions['lrwin0']['region'].getRegion()
ssRgn = self.regions['lrwin1']['region'].getRegion()
vpk = target['Time': pkRgn[0]:pkRgn[1]].min() * 1000
self.neg_pk = (vpk - vrmp) / 1000.
vss = np.median(target['Time': ssRgn[0]:ssRgn[1]]) * 1000
self.neg_ss = (vss - vrmp) / 1000.
whichdata = [int(amin)]
itaucmd = [self.Clamps.commandLevels[amin]]
self.ctrl.IVCurve_tau2TStart.setValue(rgn[0] * 1.0e3)
self.ctrl.IVCurve_tau2TStop.setValue(rgn[1] * 1.0e3)
fd = self.Clamps.traces['Time': rgn[0]:rgn[1]][whichdata][0]
if len(self.tauh_fitted.keys()) > 0:
[self.tauh_fitted[k].clear() for k in self.tauh_fitted.keys()]
self.tauh_fitted = {}
for k, d in enumerate(whichdata):
self.tauh_fitted[k] = self.data_plot.plot(fd, pen=pg.mkPen('w'))
# now do the fit
whichaxis = 0
(fpar, xf, yf, names) = Fits.FitRegion(whichdata, whichaxis,
self.Clamps.traces.xvals('Time'),
self.Clamps.traces.view(np.ndarray),
dataType='2d',
t0=rgn[0], t1=rgn[1],
fitFunc=Func,
fitPars=initpars)
if not fpar:
raise Exception('IVCurve::update_Tauh: tau_h fitting failed - see log')
bluepen = pg.mkPen('b', width=2.0, style=Qt.Qt.DashLine)
if len(self.tauh_fits.keys()) > 0:
[self.tauh_fits[k].clear() for k in self.tauh_fits.keys()]
self.tauh_fits = {}
self.tauh_fits[0] = self.data_plot.plot(xf[0]+rgn[0], yf[0], pen=bluepen)
# self.tauh_fits.update()
s = np.shape(fpar)
taus = []
for j in range(0, s[0]):
outstr = ""
taus.append(fpar[j][2])
for i in range(0, len(names[j])):
outstr += '%s = %f, ' % (names[j][i], fpar[j][i])
if printWindow:
print("Ih FIT(%d, %.1f pA): %s " %
(whichdata[j], itaucmd[j] * 1e12, outstr))
meantau = np.mean(taus)
self.ctrl.IVCurve_Tauh.setText(u'%8.1f ms' % (meantau * 1.e3))
self.tau2 = meantau
bovera = (vss - vrmp) / (vpk - vrmp)
self.ctrl.IVCurve_Ih_ba.setText('%8.1f' % (bovera * 100.))
self.ctrl.IVCurve_ssAmp.setText('%8.2f' % (vss - vrmp))
self.ctrl.IVCurve_pkAmp.setText('%8.2f' % (vpk - vrmp))
if bovera < 0.55 and self.tau2 < 0.015: #
self.ctrl.IVCurve_FOType.setText('D Stellate')
else:
self.ctrl.IVCurve_FOType.setText('T Stellate')
# estimate of Gh:
Gpk = itarget / self.neg_pk
Gss = itarget / self.neg_ss
self.Gh = Gss - Gpk
self.analysis_summary['tauh'] = self.tau2*1.e3
self.analysis_summary['Gh'] = self.Gh
self.ctrl.IVCurve_Gh.setText('%8.2f nS' % (self.Gh * 1e9))
def update_ssAnalysis(self):
"""
Compute the steady-state IV from the selected time window
Parameters
----------
None.
Returns
-------
nothing.
modifies:
ivss, yleak, ivss_cmd, cmd.
The IV curve is only valid when there are no spikes detected in
the window. The values in the curve are taken as the mean of the
current and the voltage in the time window, at each command step.
We also compute the input resistance.
For voltage clamp data, we can optionally remove the "leak" current.
The resulting curve is plotted.
"""
if self.Clamps.traces is None:
return
rgnss = self.regions['lrwin1']['region'].getRegion()
r1 = rgnss[1]
if rgnss[1] == rgnss[0]:
print('Steady-state regions have no width; using 100 msec. window for ss ')
r1 = rgnss[0] + 0.1
self.ctrl.IVCurve_ssTStart.setValue(rgnss[0] * 1.0e3)
self.ctrl.IVCurve_ssTStop.setValue(r1 * 1.0e3)
data1 = self.Clamps.traces['Time': rgnss[0]:r1]
# print 'data shape: ', data1.shape
if data1.shape[1] == 0 or data1.shape[0] == 1:
return # skip it
self.ivss = []
# check out whether there are spikes in the window that is selected
threshold = self.ctrl.IVCurve_SpikeThreshold.value() * 1e-3
ntr = len(self.Clamps.traces)
if not self.spikes_counted:
print('updatess: spikes not counted yet? ')
self.analyzeSpikes()
# spikecount = np.zeros(ntr)
# for i in range(ntr):
# (spike, spk) = Utility.findspikes(self.Clamps.time_base, self.Clamps.traces[i],
# threshold,
# t0=rgnss[0], t1=r1,
# dt=self.Clamps.sample_interval,
# mode='schmitt',
# interpolate=False,
# debug=False)
# if len(spike) > 0:
# spikecount[i] = len(spike)
# nospk = np.where(spikecount == 0)
# print 'spikes checked'
self.ivss = data1.mean(axis=1) # all traces
if self.ctrl.IVCurve_SubBaseline.isChecked():
self.ivss = self.ivss - self.ivbaseline
if len(self.nospk) >= 1:
# Steady-state IV where there are no spikes
self.ivss = self.ivss[self.nospk]
self.ivss_cmd = self.Clamps.commandLevels[self.nospk]
# self.commandLevels = commands[self.nospk]
# compute Rin from the SS IV:
# this makes the assumption that:
# successive trials are in order (as are commands)
# commands are not repeated...
if len(self.ivss_cmd) > 0 and len(self.ivss) > 0:
self.r_in = np.max(np.diff
(self.ivss) / np.diff(self.ivss_cmd))
self.ctrl.IVCurve_Rin.setText(u'%9.1f M\u03A9' % (self.r_in * 1.0e-6))
self.analysis_summary['Rin'] = self.r_in*1.0e-6
else:
self.ctrl.IVCurve_Rin.setText(u'No valid points')
self.yleak = np.zeros(len(self.ivss))
if self.ctrl.IVCurve_subLeak.isChecked():
if self.Clamps.data_mode in self.dataModel.ic_modes:
sf = 1e-12
elif self.Clamps.data_mode in self.dataModel.vc_modes:
sf = 1e-3
else:
sf = 1.0
(x, y) = Utility.clipdata(self.ivss, self.ivss_cmd,
self.ctrl.IVCurve_LeakMin.value() * sf,
self.ctrl.IVCurve_LeakMax.value() * sf)
try:
p = np.polyfit(x, y, 1) # linear fit
self.yleak = np.polyval(p, self.ivss_cmd)
self.ivss = self.ivss - self.yleak
except:
raise ValueError('IVCurve Leak subtraction: no valid points to correct')
isort = np.argsort(self.ivss_cmd)
self.ivss_cmd = self.ivss_cmd[isort]
self.ivss = self.ivss[isort]
self.analysis_summary['IV_Curve_ss'] = [self.ivss_cmd, self.ivss]
self.update_IVPlot()
def update_pkAnalysis(self, clear=False, pw=False):
"""
Compute the peak IV (minimum) from the selected window
mode can be 'min', 'max', or 'abs'
Parameters
----------
clear : Boolean, False
pw : Boolean, False
pw is passed to update_taumembrane to control printing.
"""
if self.Clamps.traces is None:
return
mode = self.ctrl.IVCurve_PeakMode.currentText()
rgnpk = self.regions['lrwin0']['region'].getRegion()
self.ctrl.IVCurve_pkTStart.setValue(rgnpk[0] * 1.0e3)
self.ctrl.IVCurve_pkTStop.setValue(rgnpk[1] * 1.0e3)
data2 = self.Clamps.traces['Time': rgnpk[0]:rgnpk[1]]
if data2.shape[1] == 0:
return # skip it - window missed the data
# check out whether there are spikes in the window that is selected
# but only in current clamp
nospk = []
peak_pos = None
if self.Clamps.data_mode in self.dataModel.ic_modes:
threshold = self.ctrl.IVCurve_SpikeThreshold.value() * 1e-3
ntr = len(self.Clamps.traces)
if not self.spikes_counted:
print('update_pkAnalysis: spikes not counted')
self.analyzeSpikes()
spikecount = np.zeros(ntr)
# for i in range(ntr):
# (spike, spk) = Utility.findspikes(self.Clamps.time_base, self.Clamps.traces[i],
# threshold,
# t0=rgnpk[0], t1=rgnpk[1],
# dt=self.Clamps.sample_interval,
# mode='schmitt',
# interpolate=False, debug=False)
# if len(spike) == 0:
# continue
# spikecount[i] = len(spike)
# nospk = np.where(spikecount == 0)
# nospk = np.array(nospk)[0]
if mode == 'Min':
self.ivpk = data2.min(axis=1)
peak_pos = np.argmin(data2, axis=1)
elif mode == 'Max':
self.ivpk = data2.max(axis=1)
peak_pos = np.argmax(data2, axis=1)
elif mode == 'Abs': # find largest regardless of the sign ('minormax')
x1 = data2.min(axis=1)
peak_pos1 = np.argmin(data2, axis=1)
x2 = data2.max(axis=1)
peak_pos2 = np.argmax(data2, axis=1)
self.ivpk = np.zeros(data2.shape[0])
for i in range(data2.shape[0]):
if -x1[i] > x2[i]:
self.ivpk[i] = x1[i]
peak_pos = peak_pos1
else:
self.ivpk[i] = x2[i]
peak_pos = peak_pos2
# self.ivpk = np.array([np.max(x1[i], x2[i]) for i in range(data2.shape[0]])
#self.ivpk = np.maximum(np.fabs(data2.min(axis=1)), data2.max(axis=1))
if self.ctrl.IVCurve_SubBaseline.isChecked():
self.ivpk = self.ivpk - self.ivbaseline
if len(self.nospk) >= 1:
# Peak (min, max or absmax voltage) IV where there are no spikes
self.ivpk = self.ivpk[self.nospk]
self.ivpk_cmd = self.Clamps.commandLevels[self.nospk]
else:
self.ivpk_cmd = self.Clamps.commandLevels
self.ivpk = self.ivpk.view(np.ndarray)
if self.ctrl.IVCurve_subLeak.isChecked():
self.ivpk = self.ivpk - self.yleak
# now sort data in ascending command levels
isort = np.argsort(self.ivpk_cmd)
self.ivpk_cmd = self.ivpk_cmd[isort]
self.ivpk = self.ivpk[isort]
self.analysis_summary['IV_Curve_pk'] = [self.ivpk_cmd, self.ivpk]
self.update_IVPlot()
peak_time = self.Clamps.time_base[peak_pos]
self.update_Tau_membrane(peak_time=peak_time, printWindow=pw)
def update_rmpAnalysis(self, **kwargs):
"""
Compute the RMP over time/commands from the selected window
"""
if self.Clamps.traces is None:
return
rgnrmp = self.regions['lrrmp']['region'].getRegion()
self.ctrl.IVCurve_rmpTStart.setValue(rgnrmp[0] * 1.0e3)
self.ctrl.IVCurve_rmpTStop.setValue(rgnrmp[1] * 1.0e3)
data1 = self.Clamps.traces['Time': rgnrmp[0]:rgnrmp[1]]
data1 = data1.view(np.ndarray)
self.ivbaseline = data1.mean(axis=1) # all traces
self.ivbaseline_cmd = self.Clamps.commandLevels
self.rmp = np.mean(self.ivbaseline) * 1e3 # convert to mV
self.ctrl.IVCurve_vrmp.setText('%8.2f' % self.rmp)
self.update_RMPPlot()
self.analysis_summary['RMP'] = self.rmp
def make_map_symbols(self):
"""
Given the current state of things, (keeping the analysis, when
superimposing multiple results, for example),
sets self.currentSymDict with a dict of pen, fill color, empty color, a symbol from
our lists, and a clearflag. Used to overplot different data.
"""
n = self.keep_analysis_count
pen = next(self.color_list)
filledbrush = pen
emptybrush = None
symbol = next(self.symbol_list)
if n == 0:
clearFlag = True
else:
clearFlag = False
self.currentSymDict = {'pen': pen, 'filledbrush': filledbrush,
'emptybrush': emptybrush, 'symbol': symbol,
'n': n, 'clearFlag': clearFlag}
def map_symbol(self):
cd = self.currentSymDict
if cd['filledbrush'] == 'w':
cd['filledbrush'] = pg.mkBrush((128, 128, 128))
if cd['pen'] == 'w':
cd['pen'] = pg.mkPen((128, 128, 128))
self.lastSymbol = (cd['pen'], cd['filledbrush'],
cd['emptybrush'], cd['symbol'],
cd['n'], cd['clearFlag'])
return self.lastSymbol
def update_IVPlot(self):
"""
Draw the peak and steady-sate IV to the I-V window
Note: x axis is always I or V, y axis V or I
"""
if self.ctrl.IVCurve_KeepAnalysis.isChecked() is False:
self.IV_plot.clear()
(pen, filledbrush, emptybrush, symbol, n, clearFlag) = \
self.map_symbol()
if self.Clamps.data_mode in self.dataModel.ic_modes:
if (len(self.ivss) > 0 and
self.ctrl.IVCurve_showHide_lrss.isChecked()):
self.IV_plot.plot(self.ivss_cmd * 1e12, self.ivss * 1e3,
symbol=symbol, pen=pen,
symbolSize=6, symbolPen=pen,
symbolBrush=filledbrush)
if (len(self.ivpk) > 0 and
self.ctrl.IVCurve_showHide_lrpk.isChecked()):
self.IV_plot.plot(self.ivpk_cmd * 1e12, self.ivpk * 1e3,
symbol=symbol, pen=pen,
symbolSize=6, symbolPen=pen,
symbolBrush=emptybrush)
self.label_up(self.IV_plot, 'I (pA)', 'V (mV)', 'I-V (CC)')
if self.Clamps.data_mode in self.dataModel.vc_modes:
if (len(self.ivss) > 0 and
self.ctrl.IVCurve_showHide_lrss.isChecked()):
self.IV_plot.plot(self.ivss_cmd * 1e3, self.ivss * 1e9,
symbol=symbol, pen=pen,
symbolSize=6, symbolPen=pen,
symbolBrush=filledbrush)
if (len(self.ivpk) > 0 and
self.ctrl.IVCurve_showHide_lrpk.isChecked()):
self.IV_plot.plot(self.ivpk_cmd * 1e3, self.ivpk * 1e9,
symbol=symbol, pen=pen,
symbolSize=6, symbolPen=pen,
symbolBrush=emptybrush)
self.label_up(self.IV_plot, 'V (mV)', 'I (nA)', 'I-V (VC)')
def update_RMPPlot(self):
"""
Draw the RMP to the I-V window
Note: x axis can be I, T, or # spikes
"""
if self.ctrl.IVCurve_KeepAnalysis.isChecked() is False:
self.RMP_plot.clear()
if len(self.ivbaseline) > 0:
(pen, filledbrush, emptybrush, symbol, n, clearFlag) = \
self.map_symbol()
mode = self.ctrl.IVCurve_RMPMode.currentIndex()
if self.Clamps.data_mode in self.dataModel.ic_modes:
sf = 1e3
self.RMP_plot.setLabel('left', 'V mV')
else:
sf = 1e12
self.RMP_plot.setLabel('left', 'I (pA)')
if mode == 0:
self.RMP_plot.plot(self.Clamps.trace_StartTimes, sf * np.array(self.ivbaseline),
symbol=symbol, pen=pen,
symbolSize=6, symbolPen=pen,
symbolBrush=filledbrush)
self.RMP_plot.setLabel('bottom', 'T (s)')
elif mode == 1:
self.RMP_plot.plot(self.Clamps.commandLevels,
1.e3 * np.array(self.ivbaseline), symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=filledbrush)
self.RMP_plot.setLabel('bottom', 'I (pA)')
elif mode == 2:
self.RMP_plot.plot(self.spikecount,
1.e3 * np.array(self.ivbaseline), symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=emptybrush)
self.RMP_plot.setLabel('bottom', 'Spikes')
else:
pass
def update_SpikePlots(self):
"""
Draw the spike counts to the FI and FSL windows
Note: x axis can be I, T, or # spikes
"""
if self.Clamps.data_mode in self.dataModel.vc_modes:
self.fiPlot.clear() # no plots of spikes in VC
self.fslPlot.clear()
return
(pen, filledbrush, emptybrush, symbol, n, clearFlag) = self.map_symbol()
mode = self.ctrl.IVCurve_RMPMode.currentIndex() # get x axis mode
self.spcmd = self.Clamps.commandLevels[self.spk] # get command levels iwth spikes
iscale = 1.0e12 # convert to pA
yfslsc = 1.0 # convert to msec
if mode == 0: # plot with time as x axis
xfi = self.Clamps.trace_StartTimes
xfsl = self.Clamps.trace_StartTimes
select = range(len(self.Clamps.trace_StartTimes))
xlabel = 'T (s)'
elif mode == 1: # plot with current as x
select = self.spk
xfi = self.Clamps.commandLevels * iscale
xfsl = self.spcmd * iscale
xlabel = 'I (pA)'
elif mode == 2: # plot with spike counts as x
xfi = self.spikecount
xfsl = self.spikecount
select = range(len(self.spikecount))
xlabel = 'Spikes (N)'
else:
return # mode not in available list
self.fiPlot.plot(x=xfi, y=self.spikecount, clear=clearFlag,
symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=filledbrush)
fslmax = 0.
if self.showFISI:
self.fslPlot.plot(x=xfsl, y=self.fsl[select] * yfslsc, clear=clearFlag,
symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=filledbrush)
self.fslPlot.plot(x=xfsl, y=self.fisi[select] * yfslsc, symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=emptybrush)
if len(xfsl) > 0:
self.fslPlot.setXRange(0.0, np.max(xfsl))
self.fslPlot.setYRange(0., max(max(self.fsl[select]), max(self.fisi[select])))
ylabel = 'Fsl/Fisi (ms)'
xfsllabel = xlabel
self.fslPlot.setTitle('FSL/FISI')
else:
maxspk = 0
maxisi = 0.
clear = clearFlag
for i, k in enumerate(self.allisi.keys()):
nspk = len(self.allisi[k])
xisi = np.arange(nspk)
self.fslPlot.plot(x=xisi, y=self.allisi[k] * yfslsc, clear=clear,
symbolSize=6,
symbol=symbol, pen=pen,
symbolPen=pen, symbolBrush=filledbrush)
clear = False
maxspk = max(nspk, maxspk)
maxisi = max(np.max(self.allisi[k]), maxisi)
self.fslPlot.setXRange(0.0, maxspk)
self.fslPlot.setYRange(0.0, maxisi)
xfsllabel = 'Spike Number'
ylabel = 'ISI (s)'
self.fslPlot.setTitle('ISI vs. Spike Number')
self.fiPlot.setLabel('bottom', xlabel)
self.fslPlot.setLabel('bottom', xfsllabel)
self.fslPlot.setLabel('left', ylabel)
def printAnalysis(self, printnow=True, script_header=True, copytoclipboard=False):
"""
Print the analysis summary information (Cell, protocol, etc)
in a nice formatted version to the terminal.
The output can be copied to another program (excel, prism) for further analysis
Parameters
----------
printnow : Boolean, optional
Set true to print to terminal, default: True
script_header : Boolean, optional
Set to print the header line, default: True
copytoclipboard : Boolean, optional
copy the text to the system clipboard, default: False
Return
------
ltxt : string
The text that would be printed. Might be useful to capture for other purposes
"""
# Dictionary structure: key = information about
if self.Clamps.data_mode in self.dataModel.ic_modes or self.Clamps.data_mode == 'vc':
data_template = self.data_template
else:
data_template = (
OrderedDict([('ElapsedTime', '{:>8.2f}'), ('HoldV', '{:>5.1f}'), ('JP', '{:>5.1f}'),
('Rs', '{:>6.2f}'), ('Cm', '{:>6.1f}'), ('Ru', '{:>6.2f}'),
('Erev', '{:>6.2f}'),
('gsyn_Erev', '{:>9.2f}'), ('gsyn_60', '{:>7.2f}'), ('gsyn_13', '{:>7.2f}'),
# ('p0', '{:6.3e}'), ('p1', '{:6.3e}'), ('p2', '{:6.3e}'), ('p3', '{:6.3e}'),
('I_ionic+', '{:>8.3f}'), ('I_ionic-', '{:>8.3f}'), ('ILeak', '{:>7.3f}'),
('win1Start', '{:>9.3f}'), ('win1End', '{:>7.3f}'),
('win2Start', '{:>9.3f}'), ('win2End', '{:>7.3f}'),
('win0Start', '{:>9.3f}'), ('win0End', '{:>7.3f}'),
]))
# summary table header is written anew for each cell
htxt = ''
if script_header:
htxt = '{:34s}\t{:15s}\t{:24s}\t'.format("Cell", "Genotype", "Protocol")
for k in data_template.keys():
cnv = '{:<%ds}' % (data_template[k][0])
# print 'cnv: ', cnv
htxt += (cnv + '\t').format(k)
script_header = False
htxt += '\n'
ltxt = ''
if 'Genotype' not in self.analysis_summary.keys():
self.analysis_summary['Genotype'] = 'Unknown'
ltxt += '{:34s}\t{:15s}\t{:24s}\t'.format(self.analysis_summary['CellID'], self.analysis_summary['Genotype'], self.analysis_summary['Protocol'])
for a in data_template.keys():
if a in self.analysis_summary.keys():
txt = self.analysis_summary[a]
if a in ['Description', 'Notes']:
txt = txt.replace('\n', ' ').replace('\r', '') # remove line breaks from output, replace \n with space
#print a, data_template[a]
ltxt += (data_template[a][1]).format(txt) + ' \t'
else:
ltxt += ('{:>%ds}' % (data_template[a][0]) + '\t').format('NaN')
ltxt = ltxt.replace('\n', ' ').replace('\r', '') # remove line breaks
ltxt = htxt + ltxt
if printnow:
print(ltxt)
if copytoclipboard:
clipb = Qt.QApplication.clipboard()
clipb.clear(mode=clipb.Clipboard)
clipb.setText(ltxt, mode=clipb.Clipboard)
return ltxt
def dbStoreClicked(self):
"""
Store data into the current database for further analysis
"""
#self.updateAnalysis()
if self.loaded is None:
return
self.dbIdentity = 'IVCurve' # type of data in the database
db = self._host_.dm.currentDatabase()
# print 'dir (db): ', dir(db)
# print 'dir (db.db): ', dir(db.db)
# print 'db.listTables: ', db.listTables()
# print 'db.tables: ', db.tables
#
table = self.dbIdentity
columns = OrderedDict([
# ('ProtocolDir', 'directory:Protocol'),
('AnalysisDate', 'text'),
('ProtocolSequenceDir', 'directory:ProtocolSequence'),
('Dir', 'text'),
('Protocol', 'text'),
('Genotype', 'text'),
('Celltype', 'text'),
('UseData', 'int'),
('RMP', 'real'),
('R_in', 'real'),
('tau_m', 'real'),
('iHold', 'real'),
('PulseDuration', 'real'),
('neg_cmd', 'real'),
('neg_pk', 'real'),
('neg_ss', 'real'),
('h_tau', 'real'),
('h_g', 'real'),
('SpikeThreshold', 'real'),
('AdaptRatio', 'real'),
('FiringRate', 'real'),
('AP1_HalfWidth', 'real'),
('AP1_Latency', 'real'),
('AP2_HalfWidth', 'real'),
('AP2_Latency', 'real'),
('AHP_Depth', 'real'),
('FI_Curve', 'text'),
('IV_Curve_pk', 'text'),
('IV_Curve_ss', 'text'),
])
if table not in db.tables:
db.createTable(table, columns, owner=self.dbIdentity)
try:
z = self.neg_cmd
except:
self.neg_cmd = 0.
self.neg_pk = 0.
self.neg_ss = 0.
self.tau2 = 0.
self.Gh = 0.
if 'Genotype' not in self.analysis_summary:
self.analysis_summary['Genotype'] = 'Unknown'
# print 'genytope: ', self.analysis_summary['Genotype']
if 'Celltype' not in self.Script.analysis_parameters:
self.analysis_summary['Celltype'] = 'Unknown'
data = {
'AnalysisDate': time.strftime("%Y-%m-%d %H:%M:%S"),
'ProtocolSequenceDir': self.loaded,
# 'ProtocolSequenceDir': self.dataModel.getParent(self.loaded, 'ProtocolSequence'),
'Dir': self.loaded.parent().name(),
'Protocol': self.loaded.name(),
'Genotype': self.analysis_summary['Genotype'],
'Celltype': self.Script.analysis_parameters['Celltype'], # uses global info, not per cell info
'UseData' : 1,
'RMP': self.rmp / 1000.,
'R_in': self.r_in,
'tau_m': self.tau,
'iHold': self.analysis_summary['iHold'],
'PulseDuration': self.analysis_summary['pulseDuration'],
'AdaptRatio': self.adapt_ratio,
'neg_cmd': self.neg_cmd,
'neg_pk': self.neg_pk,
'neg_ss': self.neg_ss,
'h_tau': self.analysis_summary['tauh'],
'h_g': self.analysis_summary['Gh'],
'SpikeThreshold': self.analysis_summary['SpikeThreshold'],
'FiringRate': self.analysis_summary['FiringRate'],
'AP1_HalfWidth': self.analysis_summary['AP1_HalfWidth'],
'AP1_Latency': self.analysis_summary['AP1_Latency'],
'AP2_HalfWidth': self.analysis_summary['AP2_HalfWidth'],
'AP2_Latency': self.analysis_summary['AP2_Latency'],
'AHP_Depth': self.analysis_summary['AHP_Depth'],
'FI_Curve': repr(self.analysis_summary['FI_Curve'].tolist()), # convert array to string for storage
'IV_Curve_pk': repr(np.array(self.analysis_summary['IV_Curve_pk']).tolist()),
'IV_Curve_ss': repr(np.array(self.analysis_summary['IV_Curve_ss']).tolist()),
}
## If only one record was given, make it into a list of one record
if isinstance(data, dict):
data = [data]
## Make sure target table exists and has correct columns, links to input file
fields = db.describeData(data)
## override directory fields since describeData can't guess these for us
# fields['ProtocolDir'] = 'directory:Protocol'
fields['ProtocolSequenceDir'] = 'directory:ProtocolSequence'
with db.transaction():
db.checkTable(table, owner=self.dbIdentity, columns=fields, create=True, addUnknownColumns=True, indexes=[['ProtocolSequenceDir'],])
dirtable = db.dirTableName(self.loaded) # set up the DirTable Protocol Sequence directory.
if not db.hasTable(dirtable):
db.createDirTable(self.loaded)
# delete old
for source in set([d['ProtocolSequenceDir'] for d in data]):
db.delete(table, where={'ProtocolSequenceDir': source})
# write new
with pg.ProgressDialog("Storing IV Results..", 0, 100) as dlg:
for n, nmax in db.iterInsert(table, data, chunkSize=30):
dlg.setMaximum(nmax)
dlg.setValue(n)
if dlg.wasCanceled():
raise HelpfulException("Scan store canceled by user.", msgType='status')
#db.close()
#db.open()
print("Updated record for ", self.loaded.name())
# ---- Helpers ----
# Some of these would normally live in a pyqtgraph-related module, but are
# just stuck here to get the job done.
#
@staticmethod
def label_up(plot, xtext, ytext, title):
"""helper to label up the plot"""
plot.setLabel('bottom', xtext)
plot.setLabel('left', ytext)
plot.setTitle(title)
| mit | e869b751c1deb7d84219fb37bb2c2a0f | 47.797268 | 152 | 0.537677 | 3.669721 | false | false | false | false |
formencode/formencode | examples/WebwareExamples/index.py | 3 | 2806 | from __future__ import absolute_import
from __future__ import print_function
from formencode import Invalid, htmlfill, Schema, validators
from WebKit.Page import Page
import six
page_style = '''
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
<title>Tell me about yourself</title>
<style type="text/css">
.error {background-color: #ffdddd}
.error-message {border: 2px solid #f00}
</style>
</head>
<body>
<h1>Tell me about yourself</h1>
<p><i>A FormEncode example</i></p>
%s
</body></html>'''
form_template = '''
<form action="" method="POST">
<p>Your name:<br>
<form:error name="name">
<input type="text" name="name"></p>
<p>Your age:<br>
<form:error name="age">
<input type="text" name="age"></p>
<p>Your favorite color:<br>
<form:error name="color">
<input type="checkbox" value="red" name="color"> Red<br>
<input type="checkbox" value="blue" name="color"> Blue<br>
<input type="checkbox" value="black" name="color"> Black<br>
<input type="checkbox" value="green" name="color"> Green<br>
<input type="checkbox" value="pink" name="color"> Pink</p>
<input type="submit" name="_action_save" value="Submit">
</form>'''
response_template = '''
<h2>Hello, %(name)s!</h2>
<p>You are %(age)d years old
and your favorite color is %(color)s.</p>'''
class FormSchema(Schema):
name = validators.String(not_empty=True)
age = validators.Int(min=13, max=99)
color = validators.OneOf(['red', 'blue', 'black', 'green'])
filter_extra_fields = True
allow_extra_fields = True
class index(Page):
def awake(self, trans):
Page.awake(self, trans)
self.rendered_form = None
def actions(self):
return ['save']
def save(self):
fields = self.request().fields()
try:
fields = FormSchema.to_python(fields, self)
except Invalid as e:
errors = dict((k, v.encode('utf-8'))
for k, v in six.iteritems(e.unpack_errors()))
print("Errors:", errors)
self.rendered_form = htmlfill.render(form_template,
defaults=fields, errors=errors)
self.writeHTML()
else:
self.doAction(fields)
def doAction(self, fields):
print("Fields:", fields)
self.rendered_form = response_template % fields
self.writeHTML()
def writeHTML(self):
if self.rendered_form is None:
self.rendered_form = htmlfill.render(form_template,
defaults=self.getDefaults())
self.write(page_style % self.rendered_form)
def getDefaults(self):
return dict(age='enter your age', color=['blue'])
def preAction(self, trans):
pass
postAction = preAction
| mit | 29ea48d39447ea6c46cfb621be0a66bf | 25.980769 | 69 | 0.62402 | 3.380723 | false | false | false | false |
formencode/formencode | src/formencode/htmlrename.py | 3 | 2184 | """
Module to rename form fields
"""
from __future__ import absolute_import
from formencode.rewritingparser import RewritingParser
__all__ = ['rename', 'add_prefix']
def rename(form, rename_func):
"""
Rename all the form fields in the form (a string), using rename_func
rename_func will be called with one argument, the name of the
field, and should return a new name.
"""
p = RenamingParser(rename_func)
p.feed(form)
p.close()
return p.text()
def add_prefix(form, prefix, dotted=False):
"""
Add the given prefix to all the fields in the form.
If dotted is true, then add a dot between prefix and the previous
name. Empty fields will use the prefix as the name (with no dot).
"""
def rename_func(field_name):
if dotted:
if field_name:
return prefix + '.' + field_name
else:
return prefix
else:
return prefix + field_name
return rename(form, rename_func)
class RenamingParser(RewritingParser):
def __init__(self, rename_func):
RewritingParser.__init__(self)
self.rename_func = rename_func
def close(self):
self.handle_misc(None)
RewritingParser.close(self)
self._text = self._get_text()
def text(self):
try:
return self._text
except AttributeError:
raise Exception(
"You must .close() a parser instance before getting "
"the text from it")
def handle_starttag(self, tag, attrs, startend=False):
self.write_pos()
if tag in ('input', 'textarea', 'select'):
self.handle_field(tag, attrs, startend)
else:
return
def handle_startendtag(self, tag, attrs):
return self.handle_starttag(tag, attrs, True)
def handle_field(self, tag, attrs, startend):
name = self.get_attr(attrs, 'name', '')
new_name = self.rename_func(name)
if name is None:
self.del_attr(attrs, 'name')
else:
self.set_attr(attrs, 'name', new_name)
self.write_tag(tag, attrs)
self.skip_next = True
| mit | e250252d8fbc408e1cda3eaae3f1dd19 | 26.64557 | 72 | 0.590659 | 3.942238 | false | false | false | false |
shymonk/django-datatable | table/templatetags/table_tags.py | 1 | 1231 | #!/usr/bin/env python
# coding: utf-8
from django import template
from django.template import Context
register = template.Library()
class TableNode(template.Node):
template_name = "table/table.html"
def __init__(self, table):
self.table = template.Variable(table)
def render(self, context):
table = self.table.resolve(context)
t = template.loader.get_template(
table.opts.template_name or self.template_name)
context = {'table': table}
return t.render(context)
class SimpleTableNode(TableNode):
template_name = "table/simple_table.html"
@register.tag
def render_table(parser, token):
try:
tag, table = token.split_contents()
except ValueError:
msg = '%r tag requires a single arguments' % token.split_contents()[0]
raise template.TemplateSyntaxError(msg)
return TableNode(table)
@register.tag
def render_simple_table(parser, token):
try:
tag, table = token.split_contents()
except ValueError:
msg = '%r tag requires a single arguments' % token.split_contents()[0]
raise template.TemplateSyntaxError(msg)
return SimpleTableNode(table)
| mit | 25f1806a5a50a517cd4333f653da5b2c | 25.355556 | 78 | 0.646629 | 3.996753 | false | false | false | false |
formencode/formencode | tests/test_doctests.py | 1 | 3527 | from __future__ import absolute_import
import os
import sys
import doctest
from formencode import compound
from formencode import htmlfill
from formencode import htmlgen
from formencode import national
from formencode import schema
from formencode import validators
import six
import pytest
"""Modules that will have their doctests tested."""
modules = [compound, htmlfill, htmlgen, national, schema, validators]
"""Text files that will have their doctests tested."""
text_files = [
"docs/htmlfill.txt",
"docs/Validator.txt",
"tests/non_empty.txt",
]
"""Used to resolve text files to absolute paths."""
base = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if six.text_type is str: # Python 3
OutputChecker = doctest.OutputChecker
class OutputChecker3(OutputChecker):
def check_output(self, want, got, optionflags):
if want.startswith("u'"):
want = want[1:]
elif want.startswith("set(["):
want = (
want[3:]
.replace("([", "{")
.replace("])", "}")
.replace("{}", "set()")
)
return OutputChecker.check_output(self, want, got, optionflags)
doctest.OutputChecker = OutputChecker3
def doctest_file(document, verbose, raise_error):
failure_count, test_count = doctest.testfile(
document,
module_relative=False,
optionflags=doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL,
verbose=verbose,
)
if raise_error:
assert test_count > 0
assert failure_count == 0
def doctest_module(document, verbose, raise_error):
failure_count, test_count = doctest.testmod(
document,
optionflags=doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL,
verbose=verbose,
)
if raise_error:
assert test_count > 0
assert failure_count == 0
def set_func_description(fn, description):
"""Wrap function and set description attr for pytest to display."""
def _wrapper(*a_test_args):
fn(*a_test_args)
_wrapper.description = description
return _wrapper
def collect_functions():
verbose = False
raise_error = True
for document in text_files + modules:
if isinstance(document, str):
name = "Doctests for %s" % (document,)
if not document.startswith(os.sep):
document = os.path.join(base, document)
yield set_func_description(
doctest_file, name
), document, verbose, raise_error
else:
name = "Doctests for %s" % (document.__name__,)
yield set_func_description(
doctest_module, name
), document, verbose, raise_error
@pytest.mark.parametrize(
"testfn,document,verbose,raise_error", list(collect_functions())
)
def test_doctests(testfn, document, verbose, raise_error):
"""Generate each doctest."""
testfn(document, verbose, raise_error)
if __name__ == "__main__":
# Call this file directly if you want to test doctests.
args = sys.argv[1:]
verbose = False
if "-v" in args:
args.remove("-v")
verbose = True
if not args:
args = text_files + modules
raise_error = False
for fn in args:
if isinstance(fn, str):
fn = os.path.join(base, fn)
doctest_file(fn, verbose, raise_error)
else:
doctest_module(fn, verbose, raise_error)
| mit | 3a4489f05aae16e52dd02e4ea7d42b38 | 26.771654 | 75 | 0.611284 | 4.101163 | false | true | false | false |
4dn-dcic/tibanna | awsf/aws_upload_output_update_json.py | 1 | 9194 | #!/usr/bin/python
import json
import sys
import boto3
import os
import re
json_old = sys.argv[1]
execution_metadata = sys.argv[2]
logfile = sys.argv[3]
md5file = sys.argv[4]
json_new = sys.argv[5]
if len(sys.argv) > 6:
language = sys.argv[6]
else:
language = 'cwl-draft3'
source_directory = '/data1/out/'
def parse_command(logfile):
"""
parse commands from the log file and returns the commands as a list
of command line lists, each corresponding to a step run.
"""
command_list = []
command = []
in_command = False
with open(logfile, 'r') as f:
for line in f:
line = line.strip('\n')
if line.startswith('[job') and line.endswith('docker \\'):
in_command = True
if in_command:
command.append(line.strip('\\'))
if not line.endswith('\\'):
in_command = False
command_list.append(command)
command = []
return(command_list)
def upload_to_s3(s3, source, bucket, target):
if os.path.isdir(source):
print("source " + source + " is a directory")
source = source.rstrip('/')
for root, dirs, files in os.walk(source):
for f in files:
source_f = os.path.join(root, f)
if root == source:
target_f = os.path.join(target, f)
else:
target_subdir = re.sub('^' + source + '/', '', root)
target_f = os.path.join(target, target_subdir, f)
print("source_f=" + source_f)
print("target_f=" + target_f)
s3.upload_file(source_f, bucket, target_f)
# for d in dirs:
# source_d = os.path.join(root, d)
# target_d = os.path.join(target, re.sub(source + '/', '', root), d)
# upload_to_s3(s3, source_d, bucket, target_d)
else:
print("source " + source + " is a not a directory")
s3.upload_file(source, bucket, target)
# read old json file
with open(json_old, 'r') as json_old_f:
old_dict = json.load(json_old_f)
output_target = old_dict.get('Job').get('Output').get('output_target')
alt_output_argnames = old_dict.get('Job').get('Output').get('alt_cond_output_argnames')
output_bucket = old_dict.get('Job').get('Output').get('output_bucket_directory')
secondary_output_target = old_dict.get('Job').get('Output').get('secondary_output_target')
for u, v in secondary_output_target.items():
if not isinstance(v, list):
secondary_output_target[u] = [v]
if language == 'wdl':
# read wdl output json file
with open(execution_metadata, 'r') as json_out_f:
wdl_output = json.load(json_out_f)
old_dict['Job']['Output'].update({'Output files': {}})
for argname, outfile in wdl_output['outputs'].iteritems():
if outfile:
old_dict['Job']['Output']['Output files'].update({argname: {'path': outfile}})
elif language == 'snakemake':
old_dict['Job']['Output'].update({'Output files': {}})
elif language == 'shell':
old_dict['Job']['Output'].update({'Output files': {}})
else:
# read cwl output json file
with open(execution_metadata, 'r') as json_out_f:
cwl_output = json.load(json_out_f)
old_dict['Job']['Output'].update({'Output files': cwl_output})
output_meta = old_dict['Job']['Output']['Output files']
# fillig in md5
with open(md5file, 'r') as md5_f:
md5dict = dict()
for line in md5_f:
a = line.split()
path = a[1]
md5sum = a[0]
md5dict[path] = md5sum
for of, ofv in output_meta.iteritems():
if ofv['path'] in md5dict:
ofv['md5sum'] = md5dict[ofv['path']]
if 'secondaryFiles' in ofv:
for sf in ofv['secondaryFiles']:
if sf['path'] in md5dict:
sf['md5sum'] = md5dict[sf['path']]
# sanity check for output target, this skips secondary files
# in case conditional alternative output targets exist, replace the output target key with
# the alternative name
# We don't need to do the same for secondary files because
# conditional alternative names only occur in WDL which does not support secondary files
replace_list = []
for k in output_target:
if k.startswith('file://'):
continue
if k not in output_meta:
if k in alt_output_argnames:
key_exists = False # initialize
for k_alt in alt_output_argnames[k]:
if k_alt in output_meta and output_meta[k_alt]['path']:
key_exists = True
replace_list.append((k, k_alt))
if not key_exists:
raise Exception("output target key {} doesn't exist in cwl-runner output".format(k))
else:
raise Exception("output target key {} doesn't exist in cwl-runner output".format(k))
for k, k_alt in replace_list:
output_target[k_alt] = output_target[k]
del output_target[k]
s3 = boto3.client('s3')
# 'file://' output targets
for k in output_target:
if k.startswith('file://'):
source = k.replace('file://', '')
target = output_target[k]
bucket = output_bucket # default
if target.startswith('s3://'): # this allows using different output buckets
output_path = re.sub('^s3://', '', target)
bucket = output_path.split('/')[0]
target = re.sub('^' + bucket + '/', '', output_path)
try:
print("uploading output file {} upload to {}".format(source, bucket + '/' + target))
# s3.upload_file(source, bucket, target)
upload_to_s3(s3, source, bucket, target)
except Exception as e:
raise Exception("output file {} upload to {} failed. %s".format(source, bucket + '/' + target) % e)
# legitimate CWL/WDL output targets
for k in output_meta:
source = output_meta[k].get('path')
source_name = source.replace(source_directory, '')
bucket = output_bucket # default
if k in output_target:
target = output_target[k] # change file name to what's specified in output_target
if target.startswith('s3://'): # this allows using different output buckets
output_path = re.sub('^s3://', '', target)
bucket = output_path.split('/')[0]
target = re.sub('^' + bucket + '/', '', output_path)
else:
target = source_name # do not change file name
print("uploading output file {} upload to {}".format(source, bucket + '/' + target))
try:
# s3.upload_file(source, bucket, target)
upload_to_s3(s3, source, bucket, target)
except Exception as e:
raise Exception("output file {} upload to {} failed. %s".format(source, bucket + '/' + target) % e)
try:
output_meta[k]['target'] = target
except Exception as e:
raise Exception("cannot update target info to json %s" % e)
if 'secondaryFiles' in output_meta[k]:
n_assigned = 0
n_target = len(secondary_output_target.get(k, []))
for i, sf in enumerate(output_meta[k]['secondaryFiles']):
source = sf.get('path')
source_name = source.replace(source_directory, '')
bucket = output_bucket # default
if k in secondary_output_target:
if len(secondary_output_target[k]) == 1: # one extra file
target = secondary_output_target[k][i]
n_assigned = n_assigned + 1
else:
for targ in secondary_output_target[k]:
if targ[-3:] == source_name[-3:]: # matching the last three letters
target = targ
n_assigned = n_assigned + 1
break
if target.startswith('s3://'): # this allows using different output buckets
output_path = re.sub('^s3://', '', target)
bucket = output_path.split('/')[0]
target = re.sub('^' + bucket + '/', '', output_path)
else:
target = source_name # do not change file name
try:
print("uploading output file {} upload to {}".format(source, bucket + '/' + target))
s3.upload_file(source, bucket, target)
except Exception as e:
raise Exception("output file {} upload to {} failed. %s".format(
source, bucket + '/' + target) % e)
try:
sf['target'] = target
except Exception as e:
raise Exception("cannot update target info to json %s" % e)
if n_assigned != n_target:
raise Exception("Error: Not all secondary output targets are uploaded!" +
"{} vs {}".format(n_assigned, n_target))
# add commands
old_dict['commands'] = parse_command(logfile)
# add file system info
old_dict['Job']['filesystem'] = os.environ.get('EBS_DEVICE', '')
# write to new json file
with open(json_new, 'w') as json_new_f:
json.dump(old_dict, json_new_f, indent=4, sort_keys=True)
| mit | 85694d6ee61fb2f1c1c55dd3986e0b8e | 39.148472 | 111 | 0.561344 | 3.769578 | false | false | false | false |
4dn-dcic/tibanna | tibanna/ami.py | 1 | 7588 | import boto3
import time
import os
from datetime import datetime
from tibanna import create_logger
from tibanna.vars import AMI_PER_REGION
logger = create_logger(__name__)
class AMI(object):
BASE_AMI = 'ami-0885b1f6bd170450c' # ubuntu 20.04 for us-east-1
BASE_REGION = 'us-east-1'
USERDATA_DIR = os.path.dirname(os.path.abspath(__file__))
USERDATA_FILE = os.path.join(USERDATA_DIR, 'create_ami_userdata')
AMI_NAME = 'tibanna-ami-' + datetime.strftime(datetime.today(), '%Y%m%d') # e.g tibanna-ami-20201113
def __init__(self, base_ami=None, base_region=None, userdata_file=None, ami_name=None):
if base_ami:
self.BASE_AMI = base_ami
if base_region:
self.BASE_REGION = base_region
if userdata_file is not None:
self.USERDATA_FILE = userdata_file
if ami_name:
self.AMI_NAME = ami_name
@staticmethod
def launch_instance_for_tibanna_ami(keyname, userdata_file, base_ami):
launch_args = {'ImageId': base_ami,
'InstanceType': 't3.micro',
'MaxCount': 1,
'MinCount': 1,
'TagSpecifications': [{'ResourceType': 'instance',
'Tags': [{"Key": "Name", "Value": "tibanna_ami"}]}]}
if userdata_file:
with open(userdata_file, 'r') as f:
userdata_str = f.read()
launch_args.update({'UserData': userdata_str})
if keyname:
launch_args.update({'KeyName': keyname})
logger.debug("launch_args=" + str(launch_args))
ec2 = boto3.client('ec2')
res = ec2.run_instances(**launch_args)
logger.debug("response from EC2 run_instances :" + str(res) + '\n\n')
instance_id = res['Instances'][0]['InstanceId']
return instance_id
def create_ami_for_tibanna(self, keyname=None, make_public=False, replicate=False):
return self.create_ami(keyname=keyname, userdata_file=self.USERDATA_FILE,
base_ami=self.BASE_AMI, ami_name=self.AMI_NAME,
make_public=make_public, base_region=self.BASE_REGION, replicate=replicate)
@staticmethod
def replicate_ami(*, ami_name, ami_id, source_region='us-east-1',
target_regions=None,
make_public=True):
""" Replicates the given ami_id from the source region into the target region(s).
Params:
ami_name (str): Name to use with AMI, typically something like tibanna-ami-20211025
ami_id (str): The AMI ID from AWS for the source image
source_region (str): The region the source AMI was created in
target_regions (str[]): List of regions to replicate the source AMI into
Leave as None to use all regions in AMI_PER_REGION
make_public (bool, default True): whether or not this AMI should be public
Raises ClientError if a boto3 call fails.
Returns an AMI_PER_REGION mapping
"""
if not target_regions:
target_regions = [r for r in AMI_PER_REGION.keys() if r != source_region]
# Create sessions in each target region and copy the AMI into it
# If this AMI is to be publicly available, sleep for 5 mins to allow
# the AMI to be registered, then modify attribution to public.
ami_per_region = {}
for region in target_regions:
region_session = boto3.client('ec2', region_name=region)
response = region_session.copy_image(
Name=ami_name,
Description=f'{ami_name} replicated from {source_region}',
SourceImageId=ami_id,
SourceRegion=source_region
)
new_image_id = response['ImageId']
logger.info(f'Copied {ami_name} from {source_region} to {region}'
f' under new AMI ID {new_image_id}')
ami_per_region[region] = new_image_id
if make_public:
logger.info('Provisioning PUBLIC AMIs - sleeping 5 mins, ctrl-c now if unintended')
time.sleep(5 * 60)
for region, image_id in ami_per_region.items():
region_session = boto3.client('ec2', region_name=region) # re-create since its been some time
region_session.modify_image_attribute(ImageId=image_id,
LaunchPermission={'Add': [{'Group': 'all'}]})
else:
logger.info(f'Provisioning private AMIs')
logger.info(f'Provisioned {ami_per_region}')
return ami_per_region
@classmethod
def create_ami(cls, keyname=None, userdata_file=USERDATA_FILE,
base_ami=BASE_AMI,
ami_name=AMI_NAME,
make_public=False,
replicate=False,
base_region='us-east-1'):
""" Helper function that creates the Tibanna AMI from a base image. """
if not userdata_file:
logger.info("no userdata.. no need to launch an instance.. just copying image")
ec2 = boto3.client('ec2')
try:
res_copy = ec2.copy_image(Name=ami_name, SourceImageId=base_ami, SourceRegion=base_region)
except:
raise Exception("Failed to copy image")
# I tried 5 min - it's not enough and it fails at the next step.
logger.info("waiting for 10min for the image to be created..")
time.sleep(10 * 60)
new_image_id = res_copy['ImageId']
if make_public:
ec2.modify_image_attribute(ImageId=new_image_id,
LaunchPermission={'Add': [{'Group': 'all'}]})
return new_image_id
# Launch an instance with base AMI
try:
instance_id = AMI.launch_instance_for_tibanna_ami(keyname, userdata_file, base_ami)
logger.debug("instance_id=" + instance_id)
except:
raise Exception("Failed to launch an instance")
logger.info("waiting for 10min for the instance to install everything and reboot..")
time.sleep(10 * 60)
# Create an image from the instance
try:
create_image_args = {'InstanceId': instance_id, 'Name': ami_name}
ec2 = boto3.client('ec2')
logger.info("creating an image...")
res_create = ec2.create_image(**create_image_args)
except:
raise Exception("Failed to create an image")
logger.info("waiting for 10min for the image to be created..")
time.sleep(10 * 60)
# Terminate instance once enough time has passed that we are certain AMI creation
# should have completed
try:
ec2.terminate_instances(InstanceIds=[instance_id])
except:
raise Exception("Failed to terminate the instance")
new_image_id = res_create['ImageId']
# Make new base image public
if make_public:
ec2.modify_image_attribute(ImageId=new_image_id,
LaunchPermission={'Add': [{'Group': 'all'}]})
# Replicate the image across regions as desired
if replicate:
cls.replicate_ami(ami_name=ami_name, ami_id=new_image_id, source_region=base_region,
make_public=make_public)
return new_image_id
| mit | 4d3db55670ebd30026148377f8612313 | 41.629213 | 110 | 0.569715 | 4.004222 | false | false | false | false |
4dn-dcic/tibanna | tibanna/cw_utils.py | 1 | 55661 | import boto3, os
from . import create_logger
from .utils import (
upload,
read_s3,
put_object_s3
)
from .top import Top
from .vars import (
AWS_REGION,
EBS_MOUNT_POINT,
S3_ENCRYT_KEY_ID
)
from datetime import datetime
from datetime import timedelta
import json, math
logger = create_logger(__name__)
class TibannaResource(object):
"""class handling cloudwatch metrics for cpu / memory /disk space
and top command metrics for cpu and memory per process.
"""
timestamp_format = '%Y-%m-%d %H:%M:%S'
report_title = 'Tibanna Metrics'
@classmethod
def convert_timestamp_to_datetime(cls, timestamp):
return datetime.strptime(timestamp, cls.timestamp_format)
def __init__(self, instance_id, filesystem, starttime, endtime=datetime.utcnow(), cost_estimate = 0.0, cost_estimate_type = "NA"):
"""All the Cloudwatch metrics are retrieved and stored at the initialization.
:param instance_id: e.g. 'i-0167a6c2d25ce5822'
:param filesystem: e.g. "/dev/xvdb", "/dev/nvme1n1"
"""
self.instance_id = instance_id
self.filesystem = filesystem
self.client = boto3.client('cloudwatch', region_name=AWS_REGION)
# get resource metrics
nTimeChunks = (endtime - starttime) / timedelta(days=1)
# self.total_minutes = (endtime - starttime) / timedelta(minutes=1)
if round(nTimeChunks) < nTimeChunks:
nTimeChunks = round(nTimeChunks) + 1
else:
nTimeChunks = round(nTimeChunks)
logger.info("Spliting run time into %s chunks" % str(nTimeChunks))
self.starttimes = [starttime + timedelta(days=k) for k in range(0, nTimeChunks)]
self.endtimes = [starttime + timedelta(days=k+1) for k in range(0, nTimeChunks)]
self.start = starttime.replace(microsecond=0) # initial starttime for the window requested
self.end = endtime.replace(microsecond=0) # initial endtime for the window requested
self.nTimeChunks = nTimeChunks
self.list_files = []
self.cost_estimate = cost_estimate
self.cost_estimate_type = cost_estimate_type
self.get_metrics(nTimeChunks)
def get_metrics(self, nTimeChunks=1):
"""calculate max/min metrics across all time chunks.
AWS allows only 1440 data points at a time
which corresponds to 24 hours at 1min interval,
so we have to split them into chunks.
"""
max_mem_used_MB_chunks = []
min_mem_available_MB_chunks = []
max_cpu_utilization_percent_chunks = []
max_disk_space_utilization_percent_chunks = []
max_disk_space_used_GB_chunks = []
max_ebs_read_chunks = []
for i in range(0, nTimeChunks):
self.starttime = self.starttimes[i]
self.endtime = self.endtimes[i]
max_mem_used_MB_chunks.append(self.max_memory_used())
min_mem_available_MB_chunks.append(self.min_memory_available())
max_cpu_utilization_percent_chunks.append(self.max_cpu_utilization())
max_disk_space_utilization_percent_chunks.append(self.max_disk_space_utilization())
max_disk_space_used_GB_chunks.append(self.max_disk_space_used())
max_ebs_read_chunks.append(self.max_ebs_read())
self.max_mem_used_MB = self.choose_max(max_mem_used_MB_chunks)
self.min_mem_available_MB = self.choose_min(min_mem_available_MB_chunks)
if self.max_mem_used_MB:
self.total_mem_MB = self.max_mem_used_MB + self.min_mem_available_MB
self.max_mem_utilization_percent = self.max_mem_used_MB / self.total_mem_MB * 100
else:
self.total_mem_MB = ''
self.max_mem_utilization_percent = ''
self.max_cpu_utilization_percent = self.choose_max(max_cpu_utilization_percent_chunks)
self.max_disk_space_utilization_percent = self.choose_max(max_disk_space_utilization_percent_chunks)
self.max_disk_space_used_GB = self.choose_max(max_disk_space_used_GB_chunks)
# this following one is used to detect file copying while CPU utilization is near zero
self.max_ebs_read_bytes = self.choose_max(max_ebs_read_chunks)
def plot_metrics(self, instance_type, directory='.', top_content=''):
"""plot full metrics across all time chunks.
AWS allows only 1440 data points at a time
which corresponds to 24 hours at 1min interval,
so we have to split them into chunks.
:param top_content: content of the <job_id>.top in the str format, used for plotting top metrics.
"""
max_mem_utilization_percent_chunks_all_pts = []
max_mem_used_MB_chunks_all_pts = []
min_mem_available_MB_chunks_all_pts = []
max_cpu_utilization_percent_chunks_all_pts = []
max_disk_space_utilization_percent_chunks_all_pts = []
max_disk_space_used_GB_chunks_all_pts = []
for i in range(0, self.nTimeChunks):
self.starttime = self.starttimes[i]
self.endtime = self.endtimes[i]
# saving all points for the chunck
max_mem_utilization_percent_chunks_all_pts.append(self.max_memory_utilization_all_pts())
max_mem_used_MB_chunks_all_pts.append(self.max_memory_used_all_pts())
min_mem_available_MB_chunks_all_pts.append(self.min_memory_available_all_pts())
max_cpu_utilization_percent_chunks_all_pts.append(self.max_cpu_utilization_all_pts())
max_disk_space_utilization_percent_chunks_all_pts.append(self.max_disk_space_utilization_all_pts())
max_disk_space_used_GB_chunks_all_pts.append(self.max_disk_space_used_all_pts())
# writing values as tsv
input_dict ={
'max_mem_used_MB': max_mem_used_MB_chunks_all_pts,
'min_mem_available_MB': min_mem_available_MB_chunks_all_pts,
'max_disk_space_used_GB': max_disk_space_used_GB_chunks_all_pts,
'max_mem_utilization_percent': max_mem_utilization_percent_chunks_all_pts,
'max_disk_space_utilization_percent': max_disk_space_utilization_percent_chunks_all_pts,
'max_cpu_utilization_percent': max_cpu_utilization_percent_chunks_all_pts
}
self.list_files.extend(self.write_top_tsvs(directory, top_content))
self.list_files.append(self.write_tsv(directory, **input_dict))
self.list_files.append(self.write_metrics(instance_type, directory))
# writing html
self.list_files.append(self.write_html(instance_type, directory))
def upload(self, bucket, prefix='', lock=True):
logger.debug("list_files: " + str(self.list_files))
for f in self.list_files:
upload(f, bucket, prefix,
encrypt_s3_upload=S3_ENCRYT_KEY_ID is not None,
kms_key_id=S3_ENCRYT_KEY_ID)
if lock:
upload(None, bucket, os.path.join(prefix, 'lock'),
encrypt_s3_upload=S3_ENCRYT_KEY_ID is not None,
kms_key_id=S3_ENCRYT_KEY_ID)
@staticmethod
def choose_max(x):
"""given a list of values that may include None, 0 or an empty string,
chooses a positive nonzero maximum. (e.g. [0,1,2,None,3] => 3)
if no positive nonzero value exists in the list, returns an empty string."""
return TibannaResource.get_max(list(filter(lambda x:x, x)))
@staticmethod
def choose_min(x):
"""given a list of values that may include None, 0 or an empty string,
chooses a nonzero minimum. (e.g. [0,1,2,None,3] => 1)
if no nonzero value exists in the list, returns an empty string."""
return TibannaResource.get_min(list(filter(lambda x:x, x)))
@staticmethod
def get_max(x):
"""given a list of values, returns maximum value,
but if the list is empty, returns an empty string"""
return(max(x) if x else '')
@staticmethod
def get_min(x):
"""given a list of values, returns miminim value,
but if the list is empty, returns an empty string"""
return(min(x) if x else '')
def as_dict(self):
d = self.__dict__.copy()
logger.debug("original dict: " + str(d))
del(d['client'])
del(d['starttimes'])
del(d['endtimes'])
del(d['starttime'])
del(d['endtime'])
del(d['filesystem'])
del(d['instance_id'])
# del(d['total_minutes'])
del(d['start'])
del(d['end'])
del(d['nTimeChunks'])
del(d['list_files'])
return(d)
# def as_table(self):
# d = self.as_dict()
# return(pd.DataFrame(d.items(), columns=['metric', 'value']))
# functions that returns only max or min (backward compatible)
def max_memory_utilization(self):
return(self.get_max(self.max_memory_utilization_all_pts()))
def max_memory_used(self):
return(self.get_max(self.max_memory_used_all_pts()))
def min_memory_available(self):
return(self.get_min(self.min_memory_available_all_pts()))
def max_cpu_utilization(self):
return(self.get_max(self.max_cpu_utilization_all_pts()))
def max_disk_space_utilization(self):
return(self.get_max(self.max_disk_space_utilization_all_pts()))
def max_disk_space_used(self):
return(self.get_max(self.max_disk_space_used_all_pts()))
def max_ebs_read(self):
return(self.get_max(self.max_ebs_read_used_all_pts()))
# functions that returns all points
def max_memory_utilization_all_pts(self):
res = self.client.get_metric_statistics(
Namespace='CWAgent',
MetricName='mem_used_percent',
Dimensions=[{
'Name': 'InstanceId', 'Value': self.instance_id
}],
Period=60,
Statistics=['Maximum'],
StartTime=self.starttime,
EndTime=self.endtime,
Unit='Percent'
)
pts = [(r['Maximum'], r['Timestamp']) for r in res['Datapoints']]
return[p[0] for p in sorted(pts, key=lambda x: x[1])]
def max_memory_used_all_pts(self):
res = self.client.get_metric_statistics(
Namespace='CWAgent',
MetricName='mem_used',
Dimensions=[{
'Name': 'InstanceId', 'Value': self.instance_id
}],
Period=60,
Statistics=['Maximum'],
StartTime=self.starttime,
EndTime=self.endtime,
Unit='Bytes'
)
pts = [(r['Maximum']/math.pow(1024, 2), r['Timestamp']) for r in res['Datapoints']] # get values in MB
return[p[0] for p in sorted(pts, key=lambda x: x[1])]
def min_memory_available_all_pts(self):
res = self.client.get_metric_statistics(
Namespace='CWAgent',
MetricName='mem_available',
Dimensions=[{
'Name': 'InstanceId', 'Value': self.instance_id
}],
Period=60,
Statistics=['Minimum'],
StartTime=self.starttime,
EndTime=self.endtime,
Unit='Bytes'
)
pts = [(r['Minimum']/math.pow(1024, 2), r['Timestamp']) for r in res['Datapoints']] # get values in MB
return[p[0] for p in sorted(pts, key=lambda x: x[1])]
def max_cpu_utilization_all_pts(self):
res = self.client.get_metric_statistics(
Namespace='CWAgent',
MetricName='cpu_usage_system',
Dimensions=[{
'Name': 'InstanceId', 'Value': self.instance_id
}],
Period=60,
Statistics=['Maximum'],
StartTime=self.starttime,
EndTime=self.endtime,
Unit='Percent'
)
pts = [(r['Maximum'], r['Timestamp']) for r in res['Datapoints']]
return[p[0] for p in sorted(pts, key=lambda x: x[1])]
def max_disk_space_utilization_all_pts(self):
res = self.client.get_metric_statistics(
Namespace='CWAgent',
MetricName='disk_used_percent',
Dimensions=[{
'Name': 'InstanceId', 'Value': self.instance_id
}],
Period=60,
Statistics=['Maximum'],
StartTime=self.starttime,
EndTime=self.endtime,
Unit='Percent'
)
pts = [(r['Maximum'], r['Timestamp']) for r in res['Datapoints']]
return[p[0] for p in sorted(pts, key=lambda x: x[1])]
def max_disk_space_used_all_pts(self):
res = self.client.get_metric_statistics(
Namespace='CWAgent',
MetricName='disk_used',
Dimensions=[{
'Name': 'InstanceId', 'Value': self.instance_id
}],
Period=60,
Statistics=['Maximum'],
StartTime=self.starttime,
EndTime=self.endtime,
Unit='Bytes'
)
pts = [(r['Maximum']/math.pow(1024, 3), r['Timestamp']) for r in res['Datapoints']] # we want it in GB
return[p[0] for p in sorted(pts, key=lambda x: x[1])]
def max_ebs_read_used_all_pts(self):
res = self.client.get_metric_statistics(
Namespace='CWAgent',
MetricName='diskio_read_bytes',
Dimensions=[{
'Name': 'InstanceId', 'Value': self.instance_id
}],
Period=60*5,
Statistics=['Average'],
StartTime=self.starttime,
EndTime=self.endtime,
Unit='Bytes'
)
pts = [(r['Average'], r['Timestamp']) for r in res['Datapoints']]
return[p[0] for p in sorted(pts, key=lambda x: x[1])]
@staticmethod
def extract_metrics_data(file_contents):
"""
This function takes a file contents string and parses it column wise.
It returns the header and the data in a way that can be injected into JS code
`data` is a Python dict, that contains all that data for later processing
outside of this function.
"""
columns, data = [], {}
columns_js, data_js = "[]", "[]"
for idx, line in enumerate(file_contents.rstrip().split('\n')):
if idx == 0:
for k,col in enumerate(line.split('\t')):
columns.append(col)
data[col] = []
else:
for k, col in enumerate(line.split('\t')):
try:
float(col) # check if it's a number, still add it as string though
data[columns[k]].append(col)
except ValueError:
logger.info("Cannot convert %s to float in column %s" % (col, columns[k]))
continue
columns.pop(0) # Remove the 'interval' column
if len(columns)>0:
columns_js = "["+(",".join(columns))+"]"
data_js = "["
for col in columns:
data_js += "[" + (",".join(data[col])) + "],"
data_js = data_js[:-1] # remove the last ,
data_js += "]"
return columns_js, columns, data_js, data
@staticmethod
def format_metrics_tsv_data(metrics_data):
max_mem_used_MB = [str(k) for k in metrics_data['max_mem_used_MB']]
max_mem_used_MB_js = "[" + (",".join(max_mem_used_MB)) + "]"
min_mem_available_MB = [str(k) for k in metrics_data['min_mem_available_MB']]
min_mem_available_MB_js = "[" + (",".join(min_mem_available_MB)) + "]"
max_disk_space_used_GB = [str(k) for k in metrics_data['max_disk_space_used_GB']]
max_disk_space_used_GB_js = "[" + (",".join(max_disk_space_used_GB)) + "]"
max_mem_utilization_percent = [str(k) for k in metrics_data['max_mem_utilization_percent']]
max_mem_utilization_percent_js = "[" + (",".join(max_mem_utilization_percent)) + "]"
max_disk_space_utilization_percent = [str(k) for k in metrics_data['max_disk_space_utilization_percent']]
max_disk_space_utilization_percent_js = "[" + (",".join(max_disk_space_utilization_percent)) + "]"
max_cpu_utilization_percent = [str(k) for k in metrics_data['max_cpu_utilization_percent']]
max_cpu_utilization_percent_js = "[" + (",".join(max_cpu_utilization_percent)) + "]"
return (
max_mem_used_MB_js,
min_mem_available_MB_js,
max_disk_space_used_GB_js,
max_mem_utilization_percent_js,
max_disk_space_utilization_percent_js,
max_cpu_utilization_percent_js,
)
# functions to create reports and html
def write_html(self, instance_type, directory):
self.check_mkdir(directory)
filename = directory + '/' + 'metrics.html'
cost_estimate = '---' if self.cost_estimate == 0.0 else "{:.5f}".format(self.cost_estimate)
metrics_contents = ""
with open(directory + '/' + 'metrics.tsv') as f:
metrics_contents = f.read()
(
metrics_col_js,
metrics_col,
metrics_data_js,
metrics_data,
) = self.extract_metrics_data(metrics_contents)
(
max_mem_used_MB_js,
min_mem_available_MB_js,
max_disk_space_used_GB_js,
max_mem_utilization_percent_js,
max_disk_space_utilization_percent_js,
max_cpu_utilization_percent_js,
) = self.format_metrics_tsv_data(metrics_data)
top_cpu_contents = ""
with open(directory + '/' + 'top_cpu.tsv') as f:
top_cpu_contents = f.read()
cpu_columns_js, cpu_columns, cpu_data_js, cpu_data = self.extract_metrics_data(top_cpu_contents)
top_mem_contents = ""
with open(directory + '/' + 'top_mem.tsv') as f:
top_mem_contents = f.read()
mem_columns_js, mem_columns, mem_data_js, mem_data = self.extract_metrics_data(top_mem_contents)
with open(filename, 'w') as fo:
fo.write(self.create_html() % (self.report_title, instance_type,
str(self.max_mem_used_MB), str(self.min_mem_available_MB), str(self.max_disk_space_used_GB),
str(self.max_mem_utilization_percent), str(self.max_cpu_utilization_percent),
str(self.max_disk_space_utilization_percent),
'---', # cost placeholder for now
cost_estimate, self.cost_estimate_type,
str(self.start), str(self.end), str(self.end - self.start),
max_mem_used_MB_js, min_mem_available_MB_js, max_disk_space_used_GB_js,
max_mem_utilization_percent_js, max_disk_space_utilization_percent_js, max_cpu_utilization_percent_js,
cpu_columns_js, cpu_data_js,
mem_columns_js, mem_data_js
)
)
return(filename)
@classmethod
def update_html(cls, bucket, prefix, directory='.', upload_new=True):
# reading tabel parameters from metrics_report.tsv
read_file = read_s3(bucket, os.path.join(prefix, 'metrics_report.tsv'))
d = {} # read the values into d
for line in read_file.rstrip().split('\n'):
k, v = line.split('\t')
d.setdefault(k, v) # everything is string now
# times into datetime objects
starttime = cls.convert_timestamp_to_datetime(d['Start_Time'])
try:
endtime = cls.convert_timestamp_to_datetime(d['End_Time'])
except: # temporary fix for retrocompatibility
if 'End_time' in d:
endtime = cls.convert_timestamp_to_datetime(d['End_time'])
else:
endtime = cls.convert_timestamp_to_datetime(d['Time_of_Request'])
cost = d['Cost'] if 'Cost' in d else '---'
estimated_cost = (float)(d['Estimated_Cost']) if 'Estimated_Cost' in d else 0.0
estimated_cost = str(estimated_cost) if estimated_cost > 0.0 else '---'
cost_estimate_type = d['Estimated_Cost_Type'] if 'Estimated_Cost_Type' in d else "NA"
instance = d['Instance_Type'] if 'Instance_Type' in d else '---'
metrics_contents = read_s3(bucket, os.path.join(prefix, 'metrics.tsv'))
(
metrics_col_js,
cpu_metrics_col,
metrics_data_js,
metrics_data,
) = TibannaResource.extract_metrics_data(metrics_contents)
(
max_mem_used_MB_js,
min_mem_available_MB_js,
max_disk_space_used_GB_js,
max_mem_utilization_percent_js,
max_disk_space_utilization_percent_js,
max_cpu_utilization_percent_js,
) = TibannaResource.format_metrics_tsv_data(metrics_data)
top_cpu_contents = read_s3(bucket, os.path.join(prefix, 'top_cpu.tsv'))
cpu_columns_js, cpu_columns, cpu_data_js, cpu_data = TibannaResource.extract_metrics_data(top_cpu_contents)
top_mem_contents = read_s3(bucket, os.path.join(prefix, 'top_mem.tsv'))
mem_columns_js, mem_columns, mem_data_js, mem_data = TibannaResource.extract_metrics_data(top_mem_contents)
# writing
html_content = cls.create_html() % (cls.report_title, instance,
d['Maximum_Memory_Used_Mb'], d['Minimum_Memory_Available_Mb'], d['Maximum_Disk_Used_Gb'],
d['Maximum_Memory_Utilization'], d['Maximum_CPU_Utilization'], d['Maximum_Disk_Utilization'],
cost,
estimated_cost, cost_estimate_type,
str(starttime), str(endtime), str(endtime-starttime),
max_mem_used_MB_js, min_mem_available_MB_js, max_disk_space_used_GB_js,
max_mem_utilization_percent_js, max_disk_space_utilization_percent_js, max_cpu_utilization_percent_js,
cpu_columns_js, cpu_data_js,
mem_columns_js, mem_data_js
)
s3_key = os.path.join(prefix, 'metrics.html')
if S3_ENCRYT_KEY_ID:
put_object_s3(content=html_content, key=s3_key, bucket=bucket,
encrypt_s3_upload=True, kms_key_id=S3_ENCRYT_KEY_ID)
else:
put_object_s3(content=html_content, key=s3_key, bucket=bucket)
@staticmethod
def write_top_tsvs(directory, top_content):
TibannaResource.check_mkdir(directory)
top_obj = Top(top_content)
top_obj.digest()
cpu_filename = directory + '/' + 'top_cpu.tsv'
mem_filename = directory + '/' + 'top_mem.tsv'
top_obj.write_to_csv(cpu_filename, delimiter='\t', metric='cpu', colname_for_timestamps='interval', base=1)
top_obj.write_to_csv(mem_filename, delimiter='\t', metric='mem', colname_for_timestamps='interval', base=1)
return [cpu_filename, mem_filename]
def write_tsv(self, directory, **kwargs): # kwargs, key: (chunks_all_pts, interval), interval is 1 or 5 min
self.check_mkdir(directory)
filename = directory + '/' + 'metrics.tsv'
with open(filename, 'w') as fo:
# preparing data and writing header
data_unpacked = []
for i, (key, arg) in enumerate(kwargs.items()):
if i == 0:
fo.write('interval\t' + key)
else:
fo.write('\t' + key)
tmp = []
[tmp.extend(a) for a in arg]
data_unpacked.append(tmp[:])
fo.write('\n')
# writing table
for i in range(len(data_unpacked[0])):
fo.write(str(i + 1))
for data in data_unpacked:
try:
fo.write('\t' + str(data[i]))
except:
fo.write('\t' + '-')
fo.write('\n')
return(filename)
def write_metrics(self, instance_type, directory):
self.check_mkdir(directory)
filename = directory + '/' + 'metrics_report.tsv'
with open(filename, 'w') as fo:
fo.write('Metric\tValue\n')
fo.write('Maximum_Memory_Used_Mb' + '\t' + str(self.max_mem_used_MB) + '\n')
fo.write('Minimum_Memory_Available_Mb' + '\t' + str(self.min_mem_available_MB) + '\n')
fo.write('Maximum_Disk_Used_Gb' + '\t' + str(self.max_disk_space_used_GB) + '\n')
fo.write('Maximum_Memory_Utilization' + '\t' + str(self.max_mem_utilization_percent) + '\n')
fo.write('Maximum_CPU_Utilization' + '\t' + str(self.max_cpu_utilization_percent) + '\n')
fo.write('Maximum_Disk_Utilization' + '\t' + str(self.max_disk_space_utilization_percent) + '\n')
fo.write('Start_Time' + '\t' + str(self.start) + '\n')
fo.write('End_Time' + '\t' + str(self.end) + '\n')
fo.write('Instance_Type' + '\t' + instance_type + '\n')
fo.write('Estimated_Cost' + '\t' + str(self.cost_estimate) + '\n')
fo.write('Estimated_Cost_Type' + '\t' + str(self.cost_estimate_type) + '\n')
return(filename)
@staticmethod
def check_mkdir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
@classmethod
def create_html(cls):
html = """\
<!DOCTYPE html>
<meta charset="utf-8">
<link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro:200,300,400,600,700,900,300i,400i,600i" rel="stylesheet"/>
<style type="text/css">
:root {
font-size: 16px
}
body{ margin: 0; }
/* Basic Styling with CSS */
h1 {
font-family: "Source Sans Pro", sans-serif;
color: #D3DADF;
font-weight: lighter;
font-size: 1.7rem;
padding-left: 50px;
}
h2 {
text-align: center;
font-family: "Source Sans Pro", sans-serif;
font-size: 1rem;
padding: 13px;
color: #ffffff;
font-weight: normal;
}
p {
font-size: .9rem;
font-family: "Source Sans Pro", sans-serif;
}
text {
font-family: "Source Sans Pro", sans-serif;
font-weight: normal;
font-size: .825rem;
}
table {
font-family: "Source Sans Pro", sans-serif;
width: 40%%;
border-collapse: collapse;
}
.right {
text-align: right;
}
.center {
text-align: center;
}
.left {
text-align: left;
}
td {
border-bottom: 1px solid #dddddd;
padding: 11px;
font-size: .925rem;
}
th {
padding: 13px;
font-size: 1rem;
background-color: #2C6088;
color: #ffffff;
font-weight: normal;
}
div {
display: block;
height: 500px;
width: 100%%;
}
.logo {
max-height: 81px;
width: 100%%;
background-color: #20445E;
display: flex;
align-items: center;
}
.header {
margin-left: auto;
margin-right: auto;
height: auto;
width: 85%%;
background-color: #2C6088;
}
.barplot {
height: 300px;
}
.barplot_legend {
height: 350px;
}
/* Style the lines by removing the fill and applying a stroke */
.line {
fill: none;
stroke: #cc0000;
stroke-width: 2;
}
.overlay {
fill: none;
pointer-events: all;
}
/* Legend */
.data-name {
margin: 0 !important;
}
.key-dot {
display: inline-block;
height: 7px;
margin-right: .5em;
width: 7px;
}
.mem { background: #2008FF;}
.cpu { background: #800380;}
.disk { background: #218000;
}
#legend{
overflow:hidden;
}
.legend {
position: relative;
float:left;
height: auto;
width: 100px;
}
.legend-wrapper {
margin-left: 150px;
height: auto;
}
/* Grid */
.grid line {
stroke: #e6f2ff;
stroke-opacity: 0.9;
shape-rendering: crispEdges;
}
.grid path {
stroke-width: 0;
}
</style>
<!-- Body tag is where we will append our SVG and SVG objects-->
<body>
<div class="logo">
<h1>%s</h1>
</div></br></br>
<section>
</br>
<table align="center">
<tr>
<th colspan="2">General Information</th>
</tr>
<tr>
<td class="left">EC2 Instance Type</td>
<td class="center">%s</td>
</tr>
</table>
</br></br>
<table align="center">
<tr>
<th colspan="2">Metrics</th>
</tr>
<tr>
<td class="left">Maximum Memory Used [Mb]</td>
<td class="center">%s</td>
</tr>
<tr>
<td class="left">Minimum Memory Available [Mb]</td>
<td class="center">%s</td>
</tr>
<tr>
<td class="left">Maximum Disk Used (/data1) [Gb]</td>
<td class="center">%s</td>
</tr>
<tr>
<td class="left">Maximum Memory Utilization [%%]</td>
<td class="center">%s</td>
</tr>
<tr>
<td class="left">Maximum CPU Utilization [%%]</td>
<td class="center">%s</td>
</tr>
<tr>
<td class="left">Maximum Disk Utilization (/data1) [%%]</td>
<td class="center">%s</td>
</tr>
<tr>
<td class="left">Cost</td>
<td class="center">%s</td>
</tr>
<tr>
<td class="left">Cost (estimated) (USD)</td>
<td class="center">%s (%s)</td>
</tr>
</table>
</br></br>
<table align="center">
<tr>
<th class="left">Start Time [UTC]</th>
<th class="left">End Time [UTC]</th>
<th class="left">Total Time</th>
</tr>
<tr>
<td class="left">%s</td>
<td class="left">%s</td>
<td class="left">%s</td>
</tr>
</table>
</section>
</br></br>
<section>
<div class="header">
<h2>Resources Utilization</h2>
</div>
<div id="chart_percent">
<div class="legend-wrapper">
<div class="legend"> <p class="data-name"><span class="key-dot cpu"></span>CPU Utilization</p> </div>
<div class="legend"> <p class="data-name"><span class="key-dot mem"></span>Memory Utilization</p> </div>
<div class="legend"> <p class="data-name"><span class="key-dot disk"></span>Disk Utilization (/data1)</p> </div>
</div>
</div></br></br>
<div class="header">
<h2>Memory Usage</h2>
</div>
<div id="chart_max_mem"> </div>
<div class="header">
<h2>Memory Available</h2>
</div>
<div id="chart_min_mem"> </div>
<div class="header">
<h2>Disk Usage (/data1)</h2>
</div>
<div id="chart_disk"> </div>
<div class="header">
<h2>CPU Usage Per Process (from Top command)</h2>
</div>
<div class="barplot" id="bar_chart_cpu"> </div>
<div id="bar_chart_cpu_legend" class="barplot_legend"> </div>
<div class="header">
<h2>Memory Usage Per Process (from Top command)</h2>
</div>
<div class="barplot" id="bar_chart_mem"> </div>
<div id="bar_chart_mem_legend" class="barplot_legend"> </div>
</section>
</body>
<!-- Load in the d3 library -->
<script src="https://d3js.org/d3.v5.min.js"></script>
<script>
//var onResize = _.debounce(function(){
// svgElem.innerHTML = '';
// line_plot();
//});
//window.onload = function(){
// window.addEventListener('resize', onResize);
//}
/* Functions definition */
function make_x_gridlines(x, n) {
var n_l = 0
if (n < 1440) {
n_l = n / 10
} else { // runtime longer than a day
n_l = n / 60
}
return d3.axisBottom(x)
.ticks(n_l)
}
function make_y_gridlines(y, n) {
var n_l = 0
if (n <= 200) {
n_l = n / 10
} else if (n <= 500) {
n_l = n / 50
} else if (n <= 2000) {
n_l = n / 100
} else if (n <= 5000) {
n_l = n / 500
} else if (n <= 20000) {
n_l = n / 1000
} else {
n_l = n / 5000
}
return d3.axisLeft(y)
.ticks(n_l)
}
function percent_plot(data_array, div) { // data_array = [data_mem, data_disk, data_cpu]
// Get div dimensions
var div_width = document.getElementById(div).offsetWidth
, div_height = document.getElementById(div).offsetHeight;
// Use the margin convention practice
var margin = {top: 40, right: 150, bottom: 100, left: 150}
, width = div_width - margin.left - margin.right // Use the window's width
, height = div_height - margin.top - margin.bottom; // Use the window's height
// Dataset as y values
data_mem = data_array[0]
data_disk = data_array[1]
data_cpu = data_array[2]
// The number of datapoints
var n_data = data_mem.length;
var n = 0
if (n_data < 5) {
n = 5
} else {
n = n_data
}
var n_cpu = data_cpu.length;
// X scale will use the index of our data
var xScale = d3.scaleLinear()
.domain([0, n]) // input
.range([0, width]); // output
// X scale for CPU utilization
var xScale_cpu = d3.scaleLinear()
.domain([0, n_cpu]) // input
.range([0, width*(n_cpu)/(n)]); // output
// Y scale will use the randomly generate number
var yScale = d3.scaleLinear()
.domain([0, 100]) // input
.range([height, 0]); // output
// d3's line generator
var line = d3.line()
.x(function(d, i) { return xScale(i) + xScale(1); }) // set the x values for the line generator
.y(function(d) { return yScale(d.y); }) // set the y values for the line generator
//.curve(d3.curveMonotoneX) // apply smoothing to the line
// d3's line generator for CPU utilization
var line_cpu = d3.line()
.x(function(d, i) { return xScale_cpu(i) + xScale(1); }) // set the x values for the line generator
.y(function(d) { return yScale(d.y); }) // set the y values for the line generator
//.curve(d3.curveMonotoneX) // apply smoothing to the line
// An array of objects of length N. Each object has key -> value pair, the key being "y" and the value is a random number
var dataset_mem = d3.range(n_data).map(function(d) { return {"y": data_mem[d] } })
var dataset_disk = d3.range(n_data).map(function(d) { return {"y": data_disk[d] } })
var dataset_cpu = d3.range(n_cpu).map(function(d) { return {"y": data_cpu[d] } })
// Add the SVG to the page
var svg = d3.select("#" + div).append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
// Add the X gridlines
svg.append("g")
.attr("class", "grid")
.attr("transform", "translate(0," + height + ")")
.call(make_x_gridlines(xScale, n)
.tickSize(-height)
.tickFormat("")
)
// Add the Y gridlines
svg.append("g")
.attr("class", "grid")
.call(make_y_gridlines(yScale, 100)
.tickSize(-width)
.tickFormat("")
)
// Call the x axis in a group tag
svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + height + ")")
.call(d3.axisBottom(xScale)); // Create an axis component with d3.axisBottom
// Call the y axis in a group tag
svg.append("g")
.attr("class", "y axis")
.call(d3.axisLeft(yScale)); // Create an axis component with d3.axisLeft
// Append the path, bind the data, and call the line generator
svg.append("path")
.datum(dataset_mem) // Binds data to the line
.attr("class", "line") // Assign a class for styling
.style("stroke", "blue")
.attr("d", line); // Calls the line generator
// Append the path, bind the data, and call the line generator
svg.append("path")
.datum(dataset_disk) // Binds data to the line
.attr("class", "line") // Assign a class for styling
.style("stroke", "green")
.attr("d", line); // Calls the line generator
// Append the path, bind the data, and call the line generator
svg.append("path")
.datum(dataset_cpu) // Binds data to the line
.attr("class", "line") // Assign a class for styling
.style("stroke", "purple")
.attr("d", line_cpu); // Calls the line generator
svg.append("text")
.attr("transform", "translate(" + (width / 2) + " ," + (height + margin.bottom - margin.bottom / 2) + ")")
.style("text-anchor", "middle")
.text("Time [min]");
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left + margin.left / 2)
.attr("x",0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.text('Percentage [%%]');
}
function line_plot(data, div, axis_label) {
// Get div dimensions
var div_width = document.getElementById(div).offsetWidth
, div_height = document.getElementById(div).offsetHeight;
// Use the margin convention practice
var margin = {top: 20, right: 150, bottom: 100, left: 150}
, width = div_width - margin.left - margin.right // Use the window's width
, height = div_height - margin.top - margin.bottom; // Use the window's height
// The number of datapoints
var n_data = data.length;
var n = 0
if (n_data < 5) {
n = 5
} else {
n = n_data
}
// X scale will use the index of our data
var xScale = d3.scaleLinear()
.domain([0, n]) // input
.range([0, width]); // output
// Y scale will use the randomly generate number
var yScale = d3.scaleLinear()
.domain([0, d3.max(data)]) // input
.range([height, 0]); // output
// d3's line generator
var line = d3.line()
.x(function(d, i) { return xScale(i) + xScale(1); }) // set the x values for the line generator
.y(function(d) { return yScale(d.y); }) // set the y values for the line generator
//.curve(d3.curveMonotoneX) // apply smoothing to the line
// An array of objects of length N. Each object has key -> value pair, the key being "y" and the value is a random number
var dataset = d3.range(n_data).map(function(d) { return {"y": data[d] } })
// Add the SVG to the page
var svg = d3.select("#" + div).append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
// Add the X gridlines
svg.append("g")
.attr("class", "grid")
.attr("transform", "translate(0," + height + ")")
.call(make_x_gridlines(xScale, n)
.tickSize(-height)
.tickFormat("")
)
// Add the Y gridlines
svg.append("g")
.attr("class", "grid")
.call(make_y_gridlines(yScale, d3.max(data))
.tickSize(-width)
.tickFormat("")
)
// Call the x axis in a group tag
svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + height + ")")
.call(d3.axisBottom(xScale)); // Create an axis component with d3.axisBottom
// Call the y axis in a group tag
svg.append("g")
.attr("class", "y axis")
.call(d3.axisLeft(yScale)); // Create an axis component with d3.axisLeft
// Append the path, bind the data, and call the line generator
svg.append("path")
.datum(dataset) // Binds data to the line
.attr("class", "line") // Assign a class for styling
.attr("d", line); // Calls the line generator
svg.append("text")
.attr("transform", "translate(" + (width / 2) + " ," + (height + margin.bottom - margin.bottom / 2) + ")")
.style("text-anchor", "middle")
.text("Time [min]");
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left + margin.left / 2)
.attr("x",0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.text(axis_label);
}
var barplot_colors = ['black', 'red', 'green', 'blue', 'magenta', 'yellow', 'cyan',
'pink', 'mediumslateblue', 'maroon', 'orange',
'gray', 'palegreen', 'mediumvioletred', 'deepskyblue',
'rosybrown', 'lightgrey', 'indigo', 'cornflowerblue']
function bar_plot(data_array, div, axis_label) {
// Get div dimensions
var div_width = document.getElementById(div).offsetWidth
, div_height = document.getElementById(div).offsetHeight;
// Use the margin convention practice
var margin = {top: 20, right: 150, bottom: 100, left: 150}
, width = div_width - margin.left - margin.right // Use the window's width
, height = div_height - margin.top - margin.bottom; // Use the window's height
// number of different colors (also number of columns to visualize together)
var n_cols = data_array.length
// The number of datapoints
var n_data = data_array[0].length;
var n = 0
if (n_data < 5) {
n = 5
} else {
n = n_data
}
// sum for each timepoint, to calculate y scale
sum_array = d3.range(n_data).map(function(d) {
var sum = 0
for( col=0; col<n_cols; col++) sum += data_array[col][d]
return sum
})
// X scale will use the index of our data
var xScale = d3.scaleLinear()
.domain([0, n]) // input
.range([0, width]) // output
// Y scale will use the randomly generate number
var yScale = d3.scaleLinear()
.domain([0, d3.max(sum_array)]) // input
.range([height, 0]); // output
// An array of objects of length N. Each object has key -> value pair, the key being "y" and the value is a random number
// Add the SVG to the page
var svg = d3.select("#" + div).append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
// Add the X gridlines
svg.append("g")
.attr("class", "grid")
.attr("transform", "translate(0," + height + ")")
.call(make_x_gridlines(xScale, n)
.tickSize(-height)
.tickFormat("")
)
// Add the Y gridlines
svg.append("g")
.attr("class", "grid")
.call(make_y_gridlines(yScale, d3.max(sum_array))
.tickSize(-width)
.tickFormat("")
)
// Call the x axis in a group tag
svg.append("g")
.attr("class", "x axis")
.attr("transform", "translate(0," + height + ")")
.call(d3.axisBottom(xScale)); // Create an axis component with d3.axisBottom
// Call the y axis in a group tag
svg.append("g")
.attr("class", "y axis")
.call(d3.axisLeft(yScale)); // Create an axis component with d3.axisLeft
// Add rectangles, bind the data
var data_array_cum = data_array // dimension and index 0 should be the same
for( var col=0; col<n_cols; col++) {
if(col == 0) {
data_array_cum[col] = d3.range(n_data).map(function(d) { return data_array[col][d] })
var dataset = d3.range(n_data).map(function(d) { return {"prev_y": 0, "y": data_array_cum[col][d]} })
}
if(col > 0) {
data_array_cum[col] = d3.range(n_data).map(function(d) { return data_array_cum[col-1][d] + data_array[col][d] })
var dataset = d3.range(n_data).map(function(d) { return {"prev_y": data_array_cum[col-1][d], "y": data_array_cum[col][d]} })
}
//var dataset = d3.range(n_data).map(function(d) { return {"dy": data_array[col][d], "y": data_array_cum[col][d]} })
svg.selectAll(".bar")
.data(dataset)
.enter()
.append('rect')
.attr("class", "bar" + col)
.attr("fill", barplot_colors[col])
.attr('x', function(d, i) { return xScale(i) + xScale(1) - xScale(0.5); })
.attr('y', function(d) { return yScale(d.y); })
.attr('height', function(d) { return yScale(d.prev_y) - yScale(d.y); })
.attr('width', xScale(1));
}
svg.append("text")
.attr("transform", "translate(" + (width / 2) + " ," + (height + margin.bottom - margin.bottom / 2) + ")")
.style("text-anchor", "middle")
.text("Time [min]");
svg.append("text")
.attr("transform", "rotate(-90)")
.attr("y", 0 - margin.left + margin.left / 2)
.attr("x",0 - (height / 2))
.attr("dy", "1em")
.style("text-anchor", "middle")
.text(axis_label);
}
function bar_plot_legend(legend_text, div) {
// Get div dimensions
var div_width = document.getElementById(div).offsetWidth
, div_height = document.getElementById(div).offsetHeight;
// Use the margin convention practice
var margin = {top: 20, right: 150, bottom: 100, left: 150}
, width = div_width - margin.left - margin.right // Use the window's width
, height = div_height - margin.top - margin.bottom; // Use the window's height
// number of different colors (also number of columns to visualize together)
var n_cols = legend_text.length
// Add the SVG to the page
var svg = d3.select("#" + div).append("svg")
.attr("width", width + margin.left + margin.right)
.attr("height", height + margin.top + margin.bottom)
.append("g")
.attr("transform", "translate(" + margin.left + "," + margin.top + ")");
for( var col=0; col<n_cols; col++) {
var legend_y = 20 * col
var legend_symbol_radius = 5
var legend_x = 2 * legend_symbol_radius + 10
// legend text
svg.append("text")
.attr("transform", "translate(" + legend_x + " ," + legend_y + ")")
.attr("text-anchor", "left")
.text(legend_text[col])
// legend circles with colors
svg.append("circle")
.attr("cy", legend_y - legend_symbol_radius)
.attr("cx", legend_symbol_radius)
.attr("r", legend_symbol_radius)
.style("fill", barplot_colors[col])
}
}
/* Reading data and Plotting */
line_plot(%s, 'chart_max_mem', 'Memory used [Mb]');
line_plot(%s, 'chart_min_mem', 'Memory available [Mb]');
line_plot(%s, 'chart_disk', 'Disk space used [Gb]');
var resources_utilization = [%s, %s, %s];
percent_plot(resources_utilization, 'chart_percent');
var cpu_columns = %s;
var cpu_data = %s;
bar_plot(cpu_data, 'bar_chart_cpu', 'Total CPU (%%) [100%% = 1 CPU]');
bar_plot_legend(cpu_columns, 'bar_chart_cpu_legend');
var mem_columns = %s;
var mem_data = %s;
bar_plot(mem_data, 'bar_chart_mem', 'Total Mem (%% total available memory)');
bar_plot_legend(mem_columns, 'bar_chart_mem_legend');
</script>\
"""
return(html)
| mit | f1294366204e41978463fbf4e6a17e37 | 46.050719 | 150 | 0.475809 | 4.076236 | false | false | false | false |
snap-stanford/ogb | ogb/lsc/mag240m.py | 1 | 7115 | from typing import Optional, Union, Dict
import os
import shutil
import os.path as osp
import torch
import numpy as np
from ogb.utils.url import decide_download, download_url, extract_zip, makedirs
from ogb.lsc.utils import split_test
class MAG240MDataset(object):
version = 1
# Old url hosted at Stanford
# md5sum: bd61c9446f557fbe4430d9a7ce108b34
# url = 'http://ogb-data.stanford.edu/data/lsc/mag240m_kddcup2021.zip'
# New url hosted by DGL team at AWS--much faster to download
url = 'https://dgl-data.s3-accelerate.amazonaws.com/dataset/OGB-LSC/mag240m_kddcup2021.zip'
__rels__ = {
('author', 'paper'): 'writes',
('author', 'institution'): 'affiliated_with',
('paper', 'paper'): 'cites',
}
def __init__(self, root: str = 'dataset'):
if isinstance(root, str):
root = osp.expanduser(osp.normpath(root))
self.root = root
self.dir = osp.join(root, 'mag240m_kddcup2021')
if osp.isdir(self.dir) and (not osp.exists(
osp.join(self.dir, f'RELEASE_v{self.version}.txt'))):
print('MAG240M dataset has been updated.')
if input('Will you update the dataset now? (y/N)\n') == 'y':
shutil.rmtree(osp.join(self.dir))
self.download()
self.__meta__ = torch.load(osp.join(self.dir, 'meta.pt'))
self.__split__ = torch.load(osp.join(self.dir, 'split_dict.pt'))
split_test(self.__split__)
def download(self):
if not osp.exists(self.dir):
if decide_download(self.url):
path = download_url(self.url, self.root)
extract_zip(path, self.root)
os.unlink(path)
else:
print('Stop download.')
exit(-1)
@property
def num_papers(self) -> int:
return self.__meta__['paper']
@property
def num_authors(self) -> int:
return self.__meta__['author']
@property
def num_institutions(self) -> int:
return self.__meta__['institution']
@property
def num_paper_features(self) -> int:
return 768
@property
def num_classes(self) -> int:
return self.__meta__['num_classes']
def get_idx_split(
self, split: Optional[str] = None
) -> Union[Dict[str, np.ndarray], np.ndarray]:
return self.__split__ if split is None else self.__split__[split]
@property
def paper_feat(self) -> np.ndarray:
path = osp.join(self.dir, 'processed', 'paper', 'node_feat.npy')
return np.load(path, mmap_mode='r')
@property
def all_paper_feat(self) -> np.ndarray:
path = osp.join(self.dir, 'processed', 'paper', 'node_feat.npy')
return np.load(path)
@property
def paper_label(self) -> np.ndarray:
path = osp.join(self.dir, 'processed', 'paper', 'node_label.npy')
return np.load(path, mmap_mode='r')
@property
def all_paper_label(self) -> np.ndarray:
path = osp.join(self.dir, 'processed', 'paper', 'node_label.npy')
return np.load(path)
@property
def paper_year(self) -> np.ndarray:
path = osp.join(self.dir, 'processed', 'paper', 'node_year.npy')
return np.load(path, mmap_mode='r')
@property
def all_paper_year(self) -> np.ndarray:
path = osp.join(self.dir, 'processed', 'paper', 'node_year.npy')
return np.load(path)
def edge_index(self, id1: str, id2: str,
id3: Optional[str] = None) -> np.ndarray:
src = id1
rel, dst = (id3, id2) if id3 is None else (id2, id3)
rel = self.__rels__[(src, dst)] if rel is None else rel
name = f'{src}___{rel}___{dst}'
path = osp.join(self.dir, 'processed', name, 'edge_index.npy')
return np.load(path)
def __repr__(self) -> str:
return f'{self.__class__.__name__}()'
class MAG240MEvaluator:
def eval(self, input_dict):
assert 'y_pred' in input_dict and 'y_true' in input_dict
y_pred, y_true = input_dict['y_pred'], input_dict['y_true']
if not isinstance(y_pred, torch.Tensor):
y_pred = torch.from_numpy(y_pred)
if not isinstance(y_true, torch.Tensor):
y_true = torch.from_numpy(y_true)
assert (y_true.numel() == y_pred.numel())
assert (y_true.dim() == y_pred.dim() == 1)
return {'acc': int((y_true == y_pred).sum()) / y_true.numel()}
def save_test_submission(self, input_dict: Dict, dir_path: str, mode: str):
assert 'y_pred' in input_dict
assert mode in ['test-whole', 'test-dev', 'test-challenge']
y_pred = input_dict['y_pred']
if mode == 'test-whole':
assert y_pred.shape == (146818, )
filename = osp.join(dir_path, 'y_pred_mag240m')
elif mode == 'test-dev':
assert y_pred.shape == (88092, )
filename = osp.join(dir_path, 'y_pred_mag240m_test-dev')
elif mode == 'test-challenge':
assert y_pred.shape == (58726, )
filename = osp.join(dir_path, 'y_pred_mag240m_test-challenge')
makedirs(dir_path)
if isinstance(y_pred, torch.Tensor):
y_pred = y_pred.cpu().numpy()
y_pred = y_pred.astype(np.short)
np.savez_compressed(filename, y_pred=y_pred)
if __name__ == '__main__':
dataset = MAG240MDataset()
print(dataset)
print(dataset.num_papers)
print(dataset.num_authors)
print(dataset.num_institutions)
print(dataset.num_classes)
split_dict = dataset.get_idx_split()
print(split_dict['train'].shape)
print(split_dict['valid'].shape)
print('-----------------')
print(split_dict['test-dev'].shape)
print(split_dict['test-whole'].shape)
print(split_dict['test-challenge'].shape)
evaluator = MAG240MEvaluator()
evaluator.save_test_submission(
input_dict = {
'y_pred': np.random.randint(100, size = split_dict['test-dev'].shape),
},
dir_path = 'results',
mode = 'test-dev'
)
evaluator.save_test_submission(
input_dict = {
'y_pred': np.random.randint(100, size = split_dict['test-challenge'].shape),
},
dir_path = 'results',
mode = 'test-challenge'
)
exit(-1)
print(dataset.paper_feat.shape)
print(dataset.paper_year.shape)
print(dataset.paper_year[:100])
print(dataset.edge_index('author', 'paper').shape)
print(dataset.edge_index('author', 'writes', 'paper').shape)
print(dataset.edge_index('author', 'writes', 'paper')[:, :10])
print('-----------------')
train_idx = dataset.get_idx_split('train')
val_idx = dataset.get_idx_split('valid')
test_idx = dataset.get_idx_split('test-whole')
print(len(train_idx) + len(val_idx) + len(test_idx))
print(dataset.paper_label[train_idx][:10])
print(dataset.paper_label[val_idx][:10])
print(dataset.paper_label[test_idx][:10])
print(dataset.paper_year[train_idx][:10])
print(dataset.paper_year[val_idx][:10])
print(dataset.paper_year[test_idx][:10])
| mit | 8736d38c626283e10620175555719a5e | 31.788018 | 95 | 0.581448 | 3.271264 | false | true | false | false |
snap-stanford/ogb | examples/graphproppred/mol/conv.py | 1 | 8741 | import torch
from torch_geometric.nn import MessagePassing
import torch.nn.functional as F
from torch_geometric.nn import global_mean_pool, global_add_pool
from ogb.graphproppred.mol_encoder import AtomEncoder,BondEncoder
from torch_geometric.utils import degree
import math
### GIN convolution along the graph structure
class GINConv(MessagePassing):
def __init__(self, emb_dim):
'''
emb_dim (int): node embedding dimensionality
'''
super(GINConv, self).__init__(aggr = "add")
self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, emb_dim))
self.eps = torch.nn.Parameter(torch.Tensor([0]))
self.bond_encoder = BondEncoder(emb_dim = emb_dim)
def forward(self, x, edge_index, edge_attr):
edge_embedding = self.bond_encoder(edge_attr)
out = self.mlp((1 + self.eps) *x + self.propagate(edge_index, x=x, edge_attr=edge_embedding))
return out
def message(self, x_j, edge_attr):
return F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GCN convolution along the graph structure
class GCNConv(MessagePassing):
def __init__(self, emb_dim):
super(GCNConv, self).__init__(aggr='add')
self.linear = torch.nn.Linear(emb_dim, emb_dim)
self.root_emb = torch.nn.Embedding(1, emb_dim)
self.bond_encoder = BondEncoder(emb_dim = emb_dim)
def forward(self, x, edge_index, edge_attr):
x = self.linear(x)
edge_embedding = self.bond_encoder(edge_attr)
row, col = edge_index
#edge_weight = torch.ones((edge_index.size(1), ), device=edge_index.device)
deg = degree(row, x.size(0), dtype = x.dtype) + 1
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
return self.propagate(edge_index, x=x, edge_attr = edge_embedding, norm=norm) + F.relu(x + self.root_emb.weight) * 1./deg.view(-1,1)
def message(self, x_j, edge_attr, norm):
return norm.view(-1, 1) * F.relu(x_j + edge_attr)
def update(self, aggr_out):
return aggr_out
### GNN to generate node embedding
class GNN_node(torch.nn.Module):
"""
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
'''
emb_dim (int): node embedding dimensionality
num_layer (int): number of GNN message passing layers
'''
super(GNN_node, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.atom_encoder = AtomEncoder(emb_dim)
###List of GNNs
self.convs = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == 'gin':
self.convs.append(GINConv(emb_dim))
elif gnn_type == 'gcn':
self.convs.append(GCNConv(emb_dim))
else:
raise ValueError('Undefined GNN type called {}'.format(gnn_type))
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### computing input node embedding
h_list = [self.atom_encoder(x)]
for layer in range(self.num_layer):
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if self.residual:
h += h_list[layer]
h_list.append(h)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer + 1):
node_representation += h_list[layer]
return node_representation
### Virtual GNN to generate node embedding
class GNN_node_Virtualnode(torch.nn.Module):
"""
Output:
node representations
"""
def __init__(self, num_layer, emb_dim, drop_ratio = 0.5, JK = "last", residual = False, gnn_type = 'gin'):
'''
emb_dim (int): node embedding dimensionality
'''
super(GNN_node_Virtualnode, self).__init__()
self.num_layer = num_layer
self.drop_ratio = drop_ratio
self.JK = JK
### add residual connection or not
self.residual = residual
if self.num_layer < 2:
raise ValueError("Number of GNN layers must be greater than 1.")
self.atom_encoder = AtomEncoder(emb_dim)
### set the initial virtual node embedding to 0.
self.virtualnode_embedding = torch.nn.Embedding(1, emb_dim)
torch.nn.init.constant_(self.virtualnode_embedding.weight.data, 0)
### List of GNNs
self.convs = torch.nn.ModuleList()
### batch norms applied to node embeddings
self.batch_norms = torch.nn.ModuleList()
### List of MLPs to transform virtual node at every layer
self.mlp_virtualnode_list = torch.nn.ModuleList()
for layer in range(num_layer):
if gnn_type == 'gin':
self.convs.append(GINConv(emb_dim))
elif gnn_type == 'gcn':
self.convs.append(GCNConv(emb_dim))
else:
raise ValueError('Undefined GNN type called {}'.format(gnn_type))
self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim))
for layer in range(num_layer - 1):
self.mlp_virtualnode_list.append(torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), \
torch.nn.Linear(2*emb_dim, emb_dim), torch.nn.BatchNorm1d(emb_dim), torch.nn.ReLU()))
def forward(self, batched_data):
x, edge_index, edge_attr, batch = batched_data.x, batched_data.edge_index, batched_data.edge_attr, batched_data.batch
### virtual node embeddings for graphs
virtualnode_embedding = self.virtualnode_embedding(torch.zeros(batch[-1].item() + 1).to(edge_index.dtype).to(edge_index.device))
h_list = [self.atom_encoder(x)]
for layer in range(self.num_layer):
### add message from virtual nodes to graph nodes
h_list[layer] = h_list[layer] + virtualnode_embedding[batch]
### Message passing among graph nodes
h = self.convs[layer](h_list[layer], edge_index, edge_attr)
h = self.batch_norms[layer](h)
if layer == self.num_layer - 1:
#remove relu for the last layer
h = F.dropout(h, self.drop_ratio, training = self.training)
else:
h = F.dropout(F.relu(h), self.drop_ratio, training = self.training)
if self.residual:
h = h + h_list[layer]
h_list.append(h)
### update the virtual nodes
if layer < self.num_layer - 1:
### add message from graph nodes to virtual nodes
virtualnode_embedding_temp = global_add_pool(h_list[layer], batch) + virtualnode_embedding
### transform virtual nodes using MLP
if self.residual:
virtualnode_embedding = virtualnode_embedding + F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
else:
virtualnode_embedding = F.dropout(self.mlp_virtualnode_list[layer](virtualnode_embedding_temp), self.drop_ratio, training = self.training)
### Different implementations of Jk-concat
if self.JK == "last":
node_representation = h_list[-1]
elif self.JK == "sum":
node_representation = 0
for layer in range(self.num_layer + 1):
node_representation += h_list[layer]
return node_representation
if __name__ == "__main__":
pass
| mit | 2bdb077ecee44361baeadba4f35867e7 | 35.573222 | 182 | 0.59055 | 3.626971 | false | false | false | false |
snap-stanford/ogb | examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/models/ke_model.py | 1 | 40563 | # -*- coding: utf-8 -*-
#
# ke_model.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Knowledge Graph Embedding Model
1. TransE_1
2. TransE_2
3. TransR
4. RESCAL
5. DistMult
6. ComplEx
7. RotatE
"""
import os
from abc import abstractmethod, ABCMeta
import numpy as np
import dgl
import torch as th
from .pytorch.tensor_models import logsigmoid
from .pytorch.tensor_models import none
from .pytorch.tensor_models import get_device
from .pytorch.tensor_models import norm
from .pytorch.tensor_models import get_scalar
from .pytorch.tensor_models import reshape
from .pytorch.tensor_models import cuda
from .pytorch.tensor_models import ExternalEmbedding
from .pytorch.tensor_models import InferEmbedding
from .pytorch.score_fun import *
from .pytorch.ke_tensor import KGEmbedding
from .pytorch.tensor_models import cosine_dist
from .pytorch.tensor_models import l2_dist
from .pytorch.tensor_models import l1_dist
from .pytorch.tensor_models import dot_dist
from .pytorch.tensor_models import extended_jaccard_dist
from .pytorch.tensor_models import floor_divide
EMB_INIT_EPS = 2.0
DEFAULT_INFER_BATCHSIZE = 1024
class BasicGEModel(object):
""" Basic Graph Embeding Model
"""
def __init__(self, device, model_name, score_func):
self._g = None
self._model_name = model_name
self._device = device
self._entity_emb = KGEmbedding(device)
self._relation_emb = KGEmbedding(device)
self._score_func = score_func
def attach_graph(self, g, etid_field='tid', ntid_filed='ntid'):
""" Attach dataset into Graph Embedding Model
Parameter
----------
g: DGLGraph
Input data for knowledge graph
etid_field: str
Edge feature name storing the edge type id
ntid_filed: str
Node feature name storing the node type id
Note
----
If the input graph is DGLGraph, we assume that it uses a homogeneous graph
to represent the heterogeneous graph. The edge type id is stored in etid_field
and the node type id is stored in ntid_filed.
"""
self._etid_field = etid_field
self._ntid_filed = ntid_filed
assert isinstance(g, dgl.DGLGraph)
self._g = g
def load(self, model_path):
""" Load Graph Embedding Model from model_path.
The default entity embeding file is entity.npy.
The default relation embedding file is relation.npy.
Parameter
---------
model_path : str
Path to store the model information
"""
pass
def save(self, model_path):
""" Save Graph Embedding Model into model_path.
All model related data are saved under model_path.
The default entity embeding file is entity.npy.
The default relation embedding file is relation.npy.
Parameter
---------
model_path : str
Path to store the model information
"""
assert False, 'Not support training now'
def fit(self):
""" Start training
"""
assert False, 'Not support training now'
def eval(self):
""" Start evaluation
"""
assert False, 'Not support evaluation now'
def _infer_score_func(self, head, rel, tail, triplet_wise=False, batch_size=DEFAULT_INFER_BATCHSIZE):
head_emb = self.entity_embed[head]
rel_emb = self.relation_embed[rel]
tail_emb = self.entity_embed[tail]
num_head = head.shape[0]
num_rel = rel.shape[0]
num_tail = tail.shape[0]
score = []
if triplet_wise:
# triplet wise score: head, relation and tail tensor have the same length N,
# for i in range(N):
# result.append(score(head[i],rel[i],tail[i]))
class FakeEdge(object):
def __init__(self, head_emb, rel_emb, tail_emb, device=-1):
self._hobj = {}
self._robj = {}
self._tobj = {}
self._hobj['emb'] = head_emb.to(device)
self._robj['emb'] = rel_emb.to(device)
self._tobj['emb'] = tail_emb.to(device)
@property
def src(self):
return self._hobj
@property
def dst(self):
return self._tobj
@property
def data(self):
return self._robj
# calculate scores in mini-batches
# so we can use GPU to accelerate the speed with avoiding GPU OOM
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
sr_emb = rel_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
st_emb = tail_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
edata = FakeEdge(sh_emb, sr_emb, st_emb, self._device)
score.append(self._score_func.edge_func(edata)['score'].to(th.device('cpu')))
score = th.cat(score, dim=0)
return score
else:
# head, relation and tail tensors has different size
# for h_i in range(head):
# for r_j in range(relation):
# for t_k in range(tail):
# result.append(score(h_i, r_j, t_k))
# The result will have shape (len(head), len(relation), len(tail))
rel_emb = rel_emb.to(self._device)
# calculating scores using mini-batch, the default batchsize if 1024
# This can avoid OOM when using GPU
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
s_score = []
sh_emb = sh_emb.to(self._device)
for j in range((num_tail + batch_size - 1) // batch_size):
st_emb = tail_emb[j * batch_size : (j + 1) * batch_size \
if (j + 1) * batch_size < num_tail \
else num_tail]
st_emb = st_emb.to(self._device)
s_score.append(self._score_func.infer(sh_emb, rel_emb, st_emb).to(th.device('cpu')))
score.append(th.cat(s_score, dim=2))
score = th.cat(score, dim=0)
return th.reshape(score, (num_head, num_rel, num_tail))
def _exclude_pos(self, sidx, score, idx, head, rel, tail, topk, exec_mode, exclude_mode):
g = self.graph
num_triples = idx.shape[0]
num_head = 1 if exec_mode == 'batch_head' else head.shape[0]
num_rel = 1 if exec_mode == 'batch_rel' else rel.shape[0]
num_tail = 1 if exec_mode == 'batch_tail' else tail.shape[0]
res_head = []
res_rel = []
res_tail = []
res_score = []
result = []
if exclude_mode == 'exclude':
# exclude existing edges
cur_k = 0
batch_size = topk
while (cur_k < num_triples):
cur_sidx = sidx[cur_k:cur_k + batch_size if cur_k + batch_size < num_triples else num_triples]
cur_score = score[cur_k:cur_k + batch_size if cur_k + batch_size < num_triples else num_triples]
cur_idx = idx[cur_sidx]
if exec_mode == 'triplet_wise':
cur_head = head[cur_idx]
cur_rel = rel[cur_idx]
cur_tail = tail[cur_idx]
elif exec_mode == 'all':
tail_idx = cur_idx % num_tail
cur_idx = floor_divide(cur_idx, num_tail)
rel_idx = cur_idx % num_rel
cur_idx = floor_divide(cur_idx, num_rel)
head_idx = cur_idx % num_head
cur_head = head[head_idx]
cur_rel = rel[rel_idx]
cur_tail = tail[tail_idx]
elif exec_mode == 'batch_head':
tail_idx = cur_idx % num_tail
cur_idx = floor_divide(cur_idx, num_tail)
rel_idx = cur_idx % num_rel
cur_head = th.full((cur_sidx.shape[0],), head, dtype=head.dtype)
cur_rel = rel[rel_idx]
cur_tail = tail[tail_idx]
elif exec_mode == 'batch_rel':
tail_idx = cur_idx % num_tail
cur_idx = floor_divide(cur_idx, num_tail)
head_idx = cur_idx % num_head
cur_head = head[head_idx]
cur_rel = th.full((cur_sidx.shape[0],), rel, dtype=rel.dtype)
cur_tail = tail[tail_idx]
elif exec_mode == 'batch_tail':
rel_idx = cur_idx % num_rel
cur_idx = floor_divide(cur_idx, num_rel)
head_idx = cur_idx % num_head
cur_head = head[head_idx]
cur_rel = rel[rel_idx]
cur_tail = th.full((cur_sidx.shape[0],), tail, dtype=tail.dtype)
# Find exising edges
# It is expacted that the existing edges are much less than triples
# The idea is: 1) we get existing edges using g.edge_ids
# 2) sort edges according to source node id (O(nlog(n)), n is number of edges)
# 3) sort candidate triples according to cur_head (O(mlog(m)), m is number of cur_head nodes)
# 4) go over all candidate triples and compare with existing edges,
# as both edges and candidate triples are sorted. filtering edges out
# will take only O(n+m)
# 5) sort the score again it taks O(klog(k))
uid, vid, eid = g.edge_ids(cur_head, cur_tail, return_uv=True)
rid = g.edata[self._etid_field][eid]
for i in range(cur_head.shape[0]):
h = cur_head[i]
r = cur_rel[i]
t = cur_tail[i]
h_where = uid == h
t_where = vid[h_where] == t
r_where = rid[h_where][t_where]
edge_exist = False
if r_where.shape[0] > 0:
for c_r in r_where:
if c_r == r:
edge_exist = True
break
if edge_exist is False:
res_head.append(h)
res_rel.append(r)
res_tail.append(t)
res_score.append(cur_score[i])
if len(res_head) >= topk:
break
cur_k += batch_size
batch_size = topk - len(res_head) # check more edges
batch_size = 16 if batch_size < 16 else batch_size # avoid tailing issue
res_head = th.tensor(res_head)
res_rel = th.tensor(res_rel)
res_tail = th.tensor(res_tail)
res_score = th.tensor(res_score)
sidx = th.argsort(res_score, dim=0, descending=True)
sidx = sidx[:topk] if topk < sidx.shape[0] else sidx
result.append((res_head[sidx],
res_rel[sidx],
res_tail[sidx],
res_score[sidx],
None))
else:
# including the existing edges in the result
topk = topk if topk < num_triples else num_triples
sidx = sidx[:topk]
idx = idx[sidx]
if exec_mode == 'triplet_wise':
head = head[idx]
rel = rel[idx]
tail = tail[idx]
elif exec_mode == 'all':
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
rel_idx = idx % num_rel
idx = floor_divide(idx, num_rel)
head_idx = idx % num_head
head = head[head_idx]
rel = rel[rel_idx]
tail = tail[tail_idx]
elif exec_mode == 'batch_head':
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
rel_idx = idx % num_rel
head = th.full((topk,), head, dtype=head.dtype)
rel = rel[rel_idx]
tail = tail[tail_idx]
elif exec_mode == 'batch_rel':
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
head_idx = idx % num_head
head = head[head_idx]
rel = th.full((topk,), rel, dtype=rel.dtype)
tail = tail[tail_idx]
elif exec_mode == 'batch_tail':
rel_idx = idx % num_rel
idx = floor_divide(idx, num_rel)
head_idx = idx % num_head
head = head[head_idx]
rel = rel[rel_idx]
tail = th.full((topk,), tail, dtype=tail.dtype)
if exclude_mode == 'mask':
# Find exising edges
# It is expacted that the existing edges are much less than triples
# The idea is: 1) we get existing edges using g.edge_ids
# 2) sort edges according to source node id (O(nlog(n)), n is number of edges)
# 3) sort candidate triples according to cur_head (O(mlog(m)), m is number of cur_head nodes)
# 4) go over all candidate triples and compare with existing edges and mask them,
# as both edges and candidate triples are sorted. filtering edges out
# will take only O(n+m)
uid, vid, eid = g.edge_ids(head, tail, return_uv=True)
rid = g.edata[self._etid_field][eid]
mask = th.full((head.shape[0],), False, dtype=th.bool)
if len(uid) > 0:
for i in range(head.shape[0]):
h = head[i]
r = rel[i]
t = tail[i]
h_where = uid == h
t_where = vid[h_where] == t
r_where = rid[h_where][t_where]
if r_where.shape[0] > 0:
for c_r in r_where:
if c_r == r:
mask[i] = True
break
result.append((head, rel, tail, score, mask))
else:
result.append((head, rel, tail, score, None))
return result
def _topk_exclude_pos(self, score, idx, head, rel, tail, topk, exec_mode, exclude_mode):
""" Generate topk most relevent triplets and corresponding scores.
It takes following steps:
1) find topk elements
2) sort topk elements in descending order
3) call _exclude_pos if figure out existing edges
"""
if exclude_mode == 'exclude':
if idx.shape[0] < topk * 4: # TODO(xiangsx): Find a better value of topk * n
topk_score, topk_sidx = th.topk(score, k=idx.shape[0], dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
sidx = topk_sidx[sidx]
result = self._exclude_pos(sidx=sidx,
score=topk_score,
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
else:
topk_score, topk_sidx = th.topk(score, k= topk * 4, dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
sidx = topk_sidx[sidx]
result = self._exclude_pos(sidx=sidx,
score=topk_score,
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
if len(result) < topk:
sidx = th.argsort(score, dim=0, descending=True)
result = self._exclude_pos(sidx=sidx,
score=score[sidx],
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
else:
topk = idx.shape[0] if idx.shape[0] < topk else topk
topk_score, topk_sidx = th.topk(score, k=topk, dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
sidx = topk_sidx[sidx]
result = self._exclude_pos(sidx=sidx,
score=topk_score,
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
return result
def link_predict(self, head=None, rel=None, tail=None, exec_mode='all', sfunc='none', topk=10, exclude_mode=None, batch_size=DEFAULT_INFER_BATCHSIZE):
""" Predicts missing entities or relations in a triplet.
Given head_id, relation_id and tail_id, return topk most relevent triplet.
Parameters
----------
head: th.Tensor
A tensor of head entity id.
rel: th.Tensor
A tensor of relation id.
tail: th.Tensor
A tensor of tail entity id.
exec_mode: str
How to calculate scores for triplets and calculate topK:
* triplet_wise: head, relation and tail lists have the same length N,
and we calculate the similarity triplet by triplet:
``result = topK([score(h_i, r_i, t_i) for i in N])``,
the result shape will be (K,)
* all: three lists of head, relation and tail ids are provided as H, R and T,
and we calculate all possible combinations of all triplets (h_i, r_j, t_k):
``result = topK([[[score(h_i, r_j, t_k) for each h_i in H] for each r_j in R] for each t_k in T])``,
the result shape will be (K,)
* batch_head: three lists of head, relation and tail ids are provided as H, R and T
and we calculate topK for each element in head:
``result = topK([[score(h_i, r_j, t_k) for each r_j in R] for each t_k in T]) for each h_i in H``
the result shape will be (sizeof(H), K)
* batch_rel: three lists of head, relation and tail ids are provided as H, R and T,
and we calculate topK for each element in relation:
``result = topK([[score(h_i, r_j, t_k) for each h_i in H] for each t_k in T]) for each r_j in R``,
the result shape will be (sizeof(R), K)
* batch_tail: three lists of head, relation and tail ids are provided as H, R and T,
and we calculate topK for each element in tail:
``result = topK([[score(h_i, r_j, t_k) for each h_i in H] for each r_j in R]) for each t_k in T``,
the result shape will be (sizeof(T), K)
sfunc: str
What kind of score is used in ranking and will be output:
* none: $score = x$
* logsigmoid: $score = log(sigmoid(x))
topk: int
Return top k results
exclude_mode: str
Whether to exclude positive edges:
* None: Do not exclude positive edges.
* 'mask': Return topk edges and a mask indicating which one is positive edge.
* 'exclude': Exclude positive edges, the returned k edges will be missing edges in the graph.
Return
------
A list of (head_idx, rel_idx, tail_idx, score)
"""
if head is None:
head = th.arange(0, self.num_entity)
else:
head = th.tensor(head)
if rel is None:
rel = th.arange(0, self.num_rel)
else:
rel = th.tensor(rel)
if tail is None:
tail = th.arange(0, self.num_entity)
else:
tail = th.tensor(tail)
num_head = head.shape[0]
num_rel = rel.shape[0]
num_tail = tail.shape[0]
if sfunc == 'none':
sfunc = none
else:
sfunc = logsigmoid
# if exclude_mode is not None, we need a graph to do the edge filtering
assert (self._g is not None) or (exclude_mode is None), \
'If exclude_mode is not None, please use load_graph() to initialize ' \
'a graph for edge filtering.'
if exec_mode == 'triplet_wise':
assert num_head == num_rel, \
'For triplet wise exection mode, head, relation and tail lists should have same length'
assert num_head == num_tail, \
'For triplet wise exection mode, head, relation and tail lists should have same length'
with th.no_grad():
raw_score = self._infer_score_func(head, rel, tail, triplet_wise=True, batch_size=batch_size)
score = sfunc(raw_score)
idx = th.arange(0, num_head)
result = self._topk_exclude_pos(score=score,
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
elif exec_mode == 'all':
result = []
with th.no_grad():
raw_score = self._infer_score_func(head, rel, tail)
raw_score = th.reshape(raw_score, (head.shape[0]*rel.shape[0]*tail.shape[0],))
score = sfunc(raw_score)
idx = th.arange(0, num_head * num_rel * num_tail)
result = self._topk_exclude_pos(score=score,
idx=idx,
head=head,
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
elif exec_mode == 'batch_head':
result = []
with th.no_grad():
raw_score = self._infer_score_func(head, rel, tail)
for i in range(num_head):
score = sfunc(th.reshape(raw_score[i,:,:], (rel.shape[0]*tail.shape[0],)))
idx = th.arange(0, num_rel * num_tail)
res = self._topk_exclude_pos(score=score,
idx=idx,
head=head[i],
rel=rel,
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
result.append(res[0])
elif exec_mode == 'batch_rel':
result = []
with th.no_grad():
raw_score = self._infer_score_func(head, rel, tail)
for i in range(num_rel):
score = sfunc(th.reshape(raw_score[:,i,:], (head.shape[0]*tail.shape[0],)))
idx = th.arange(0, num_head * num_tail)
res = self._topk_exclude_pos(score=score,
idx=idx,
head=head,
rel=rel[i],
tail=tail,
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
result.append(res[0])
elif exec_mode == 'batch_tail':
result = []
with th.no_grad():
raw_score = self._infer_score_func(head, rel, tail)
for i in range(num_tail):
score = sfunc(th.reshape(raw_score[:,:,i], (head.shape[0]*rel.shape[0],)))
idx = th.arange(0, num_head * num_rel)
res = self._topk_exclude_pos(score=score,
idx=idx,
head=head,
rel=rel,
tail=tail[i],
topk=topk,
exec_mode=exec_mode,
exclude_mode=exclude_mode)
result.append(res[0])
else:
assert False, 'unknow execution mode type {}'.format(exec_mode)
return result
def _embed_sim(self, head, tail, emb, sfunc='cosine', bcast=False, pair_ws=False, topk=10):
batch_size=DEFAULT_INFER_BATCHSIZE
if head is None:
head = th.arange(0, emb.shape[0])
else:
head = th.tensor(head)
if tail is None:
tail = th.arange(0, emb.shape[0])
else:
tail = th.tensor(tail)
head_emb = emb[head]
tail_emb = emb[tail]
if sfunc == 'cosine':
sim_func = cosine_dist
elif sfunc == 'l2':
sim_func = l2_dist
elif sfunc == 'l1':
sim_func = l1_dist
elif sfunc == 'dot':
sim_func = dot_dist
elif sfunc == 'ext_jaccard':
sim_func = extended_jaccard_dist
if pair_ws is True:
result = []
# chunked cal score
score = []
num_head = head.shape[0]
num_tail = tail.shape[0]
# calculating scores using mini-batch, the default batchsize if 1024
# This can avoid OOM when using GPU
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
sh_emb = sh_emb.to(self._device)
st_emb = tail_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
st_emb = st_emb.to(self._device)
score.append(sim_func(sh_emb, st_emb, pw=True).to(th.device('cpu')))
score = th.cat(score, dim=0)
topk_score, topk_sidx = th.topk(score,
k=topk if score.shape[0] > topk else score.shape[0],
dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
sidx = topk_sidx[sidx]
score = score[sidx]
result.append((head[sidx],
tail[sidx],
score))
else:
num_head = head.shape[0]
num_tail = tail.shape[0]
# calculating scores using mini-batch, the default batchsize if 1024
# This can avoid OOM when using GPU
score = []
for i in range((num_head + batch_size - 1) // batch_size):
sh_emb = head_emb[i * batch_size : (i + 1) * batch_size \
if (i + 1) * batch_size < num_head \
else num_head]
sh_emb = sh_emb.to(self._device)
s_score = []
for j in range((num_tail + batch_size - 1) // batch_size):
st_emb = tail_emb[j * batch_size : (j + 1) * batch_size \
if (j + 1) * batch_size < num_tail \
else num_tail]
st_emb = st_emb.to(self._device)
s_score.append(sim_func(sh_emb, st_emb).to(th.device('cpu')))
score.append(th.cat(s_score, dim=1))
score = th.cat(score, dim=0)
if bcast is False:
result = []
idx = th.arange(0, num_head * num_tail)
score = th.reshape(score, (num_head * num_tail, ))
topk_score, topk_sidx = th.topk(score,
k=topk if score.shape[0] > topk else score.shape[0],
dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
score = topk_score[sidx]
sidx = topk_sidx[sidx]
idx = idx[sidx]
tail_idx = idx % num_tail
idx = floor_divide(idx, num_tail)
head_idx = idx % num_head
result.append((head[head_idx],
tail[tail_idx],
score))
else: # bcast at head
result = []
for i in range(num_head):
i_score = score[i]
topk_score, topk_sidx = th.topk(i_score,
k=topk if i_score.shape[0] > topk else i_score.shape[0],
dim=0)
sidx = th.argsort(topk_score, dim=0, descending=True)
i_score = topk_score[sidx]
idx = topk_sidx[sidx]
result.append((th.full((topk,), head[i], dtype=head[i].dtype),
tail[idx],
i_score))
return result
def embed_sim(self, left=None, right=None, embed_type='entity', sfunc='cosine', bcast=False, pair_ws=False, topk=10):
""" Finds the most similar entity/relation embeddings for
some pre-defined similarity functions given a set of
entities or relations.
Parameters
----------
left: th.Tensor
A tensor of left object id.
right: th.Tensor
A tensor of right object id.
embed_type: str
Whether it is using entity embedding or relation embedding.
If `entity`, it is entity embedding.
If 'relation', it is relation embedding.
sfunc: str
What kind of similarity function is used in ranking and will be output:
* cosine: use cosine similarity, score = $\frac{x \cdot y}{||x||_2||y||_2}$'
* l2: use l2 similarity, score = -$||x - y||_2$
* l1: use l1 similarity, score = -$||x - y||_1$
* dot: use dot product similarity, score = $x \cdot y$
* ext_jaccard: use extended jaccard similarity, score = $\frac{x \cdot y}{||x||_{2}^{2} + ||y||_{2}^{2} - x \cdot y}$
bcast: bool
If True, both left and right objects are provided as L and R,, and we calculate topK for each element in L:
* 'result = topK([score(l_i, r_j) for r_j in R]) for l_j in L, the result shape will be (sizeof(L), K)
Default: False
pair_ws: bool
If True, both left and right objects are provided with the same length N, and we will calculate the similarity pair by pair:
* result = topK([score(l_i, r_i)]) for i in N, the result shape will be (K,)
Default: False
topk: int
Return top k results
Note
----
If both bcast and pair_ws is False, both left and right objects are provided as L and R,
and we calculate all possible combinations of (l_i, r_j):
``result = topK([[score(l_i, rj) for l_i in L] for r_j in R])``,
the result shape will be (K,)
Return
------
A list of (left_idx, right_idx, sim_score)
"""
if embed_type == 'entity':
emb = self.entity_embed
elif embed_type == 'relation':
emb = self.relation_embed
else:
assert False, 'emb should entity or relation'
return self._embed_sim(head=left,
tail=right,
emb=emb,
sfunc=sfunc,
bcast=bcast,
pair_ws=pair_ws,
topk=topk)
@property
def model_name(self):
return self._model_name
@property
def entity_embed(self):
return self._entity_emb.emb
@property
def relation_embed(self):
return self._relation_emb.emb
@property
def num_entity(self):
return -1 if self.entity_embed is None else self.entity_embed.shape[0]
@property
def num_rel(self):
return -1 if self.relation_embed is None else self.relation_embed.shape[0]
@property
def graph(self):
return self._g
class KGEModel(BasicGEModel):
""" Basic Knowledge Graph Embedding Model
"""
def __init__(self, device, model_name, score_func):
super(KGEModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
entity_emb_file = 'entity.npy'
relation_emb_file = 'relation.npy'
self._entity_emb.load(model_path, entity_emb_file)
self._relation_emb.load(model_path, relation_emb_file)
self._score_func.load(model_path, self.model_name)
class TransEModel(KGEModel):
""" TransE Model
"""
def __init__(self, device, gamma):
model_name = 'TransE'
score_func = TransEScore(gamma, 'l2')
self._gamma = gamma
super(TransEModel, self).__init__(device, model_name, score_func)
class TransE_l2Model(KGEModel):
""" TransE_l2 Model
"""
def __init__(self, device, gamma):
model_name = 'TransE_l2'
score_func = TransEScore(gamma, 'l2')
self._gamma = gamma
super(TransE_l2Model, self).__init__(device, model_name, score_func)
class TransE_l1Model(KGEModel):
""" TransE_l1 Model
"""
def __init__(self, device, gamma):
model_name = 'TransE_l1'
score_func = TransEScore(gamma, 'l1')
self._gamma = gamma
super(TransE_l1Model, self).__init__(device, model_name, score_func)
class TransRModel(KGEModel):
""" TransR Model
"""
def __init__(self, device, gamma):
model_name = 'TransR'
# TransR score initialization is done at fit or load model
projection_emb = KGEmbedding(device)
score_func = TransRScore(gamma, projection_emb, -1, -1)
self._gamma = gamma
super(TransRModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
super(TransRModel, self).load(model_path)
self._score_func.relation_dim = self._relation_emb.emb.shape[1]
self._score_func.entity_dim = self._entity_emb.emb.shape[1]
class DistMultModel(KGEModel):
""" DistMult Model
"""
def __init__(self, device):
model_name = 'DistMult'
score_func = DistMultScore()
super(DistMultModel, self).__init__(device, model_name, score_func)
class ComplExModel(KGEModel):
""" ComplEx Model
"""
def __init__(self, device):
model_name = 'ComplEx'
score_func = ComplExScore()
super(ComplExModel, self).__init__(device, model_name, score_func)
class RESCALModel(KGEModel):
""" RESCAL Model
"""
def __init__(self, device):
model_name = 'RESCAL'
score_func = RESCALScore(-1, -1)
super(RESCALModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
super(RESCALModel, self).load(model_path)
self._score_func.entity_dim = self._entity_emb.emb.shape[1]
self._score_func.relation_dim = self._relation_emb.emb.shape[1] // self._score_func.entity_dim
class RotatEModel(KGEModel):
""" RotatE Model
"""
def __init__(self, device, gamma):
model_name = 'RotatE'
self._gamma = gamma
score_func = RotatEScore(gamma, 0)
super(RotatEModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
super(RotatEModel, self).load(model_path)
# retrive emb_init, which is used in scoring func
entity_dim = self._entity_emb.emb.shape[1]
hidden_dim = entity_dim // 2
emb_init = (self._gamma + EMB_INIT_EPS) / hidden_dim
self._score_func.emb_init = emb_init
class GNNModel(BasicGEModel):
""" Basic GNN Model
"""
def __init__(self, device, model_name, gamma=0):
if model_name == 'TransE' or model_name == 'TransE_l2':
score_func = TransEScore(gamma, 'l2')
elif model_name == 'TransE_l1':
score_func = TransEScore(gamma, 'l1')
elif model_name == 'DistMult':
score_func = DistMultScore()
else:
assert model_name in ['TransE', 'TransE_l2', 'TransE_l1', 'DistMult'], \
"For general purpose Scoring function for GNN, we only support TransE_l1, TransE_l2" \
"DistMult, but {} is given.".format(model_name)
super(GNNModel, self).__init__(device, model_name, score_func)
def load(self, model_path):
entity_emb_file = 'entity.npy'
relation_emb_file = 'relation.npy'
self._entity_emb.load(model_path, entity_emb_file)
self._relation_emb.load(model_path, relation_emb_file)
| mit | f640aa54e2a4d9cacec1ffccbaa5a18d | 40.47546 | 154 | 0.476148 | 4.079143 | false | false | false | false |
snap-stanford/ogb | examples/lsc/wikikg90m/dgl-ke-ogb-lsc/python/dglke/models/pytorch/ke_tensor.py | 1 | 8309 | # -*- coding: utf-8 -*-
#
# tensor_models.py
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
KG Sparse embedding
"""
import os
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as functional
import torch.nn.init as INIT
import torch.multiprocessing as mp
from torch.multiprocessing import Queue
from _thread import start_new_thread
import traceback
from functools import wraps
from .. import *
from .tensor_models import thread_wrapped_func
class KGEmbedding:
"""Sparse Embedding for Knowledge Graph
It is used to store both entity embeddings and relation embeddings.
Parameters
----------
num : int
Number of embeddings.
dim : int
Embedding dimention size.
device : th.device
Device to store the embedding.
"""
def __init__(self, device):
self.device = device
self.emb = None
self.is_train = False
def init(self, emb_init, lr, async_threads, num=-1, dim=-1):
"""Initializing the embeddings for training.
Parameters
----------
emb_init : float
The intial embedding range should be [-emb_init, emb_init].
"""
if self.emb is None:
self.emb = th.empty(num, dim, dtype=th.float32, device=self.device)
else:
self.num = self.emb.shape[0]
self.dim = self.emb.shape[1]
self.state_sum = self.emb.new().resize_(self.emb.size(0)).zero_()
self.trace = []
self.state_step = 0
self.has_cross_rel = False
self.lr = lr
INIT.uniform_(self.emb, -emb_init, emb_init)
INIT.zeros_(self.state_sum)
def load(self, path, name):
"""Load embeddings.
Parameters
----------
path : str
Directory to load the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name)
self.emb = th.Tensor(np.load(file_name))
def load_emb(self, emb_array):
"""Load embeddings from numpy array.
Parameters
----------
emb_array : numpy.array or torch.tensor
Embedding array in numpy array or torch.tensor
"""
if isinstance(emb_array, np.ndarray):
self.emb = th.Tensor(emb_array)
else:
self.emb = emb_array
def save(self, path, name):
"""Save embeddings.
Parameters
----------
path : str
Directory to save the embedding.
name : str
Embedding name.
"""
file_name = os.path.join(path, name)
np.save(file_name, self.emb.cpu().detach().numpy())
def train(self):
self.is_train = True
def eval(self):
self.is_train = False
def setup_cross_rels(self, cross_rels, global_emb):
cpu_bitmap = th.zeros((self.num,), dtype=th.bool)
for i, rel in enumerate(cross_rels):
cpu_bitmap[rel] = 1
self.cpu_bitmap = cpu_bitmap
self.has_cross_rel = True
self.global_emb = global_emb
def get_noncross_idx(self, idx):
cpu_mask = self.cpu_bitmap[idx]
gpu_mask = ~cpu_mask
return idx[gpu_mask]
def share_memory(self):
"""Use torch.tensor.share_memory_() to allow cross process tensor access
"""
self.emb.share_memory_()
self.state_sum.share_memory_()
def __call__(self, idx, gpu_id=-1, trace=True):
""" Return sliced tensor.
Parameters
----------
idx : th.tensor
Slicing index
gpu_id : int
Which gpu to put sliced data in.
trace : bool
If True, trace the computation. This is required in training.
If False, do not trace the computation.
Default: True
"""
# for inference or evaluation
if self.is_train is False:
return self.emb[idx].to(gpu_id)
if self.has_cross_rel:
cpu_idx = idx.cpu()
cpu_mask = self.cpu_bitmap[cpu_idx]
cpu_idx = cpu_idx[cpu_mask]
cpu_idx = th.unique(cpu_idx)
if cpu_idx.shape[0] != 0:
cpu_emb = self.global_emb.emb[cpu_idx]
self.emb[cpu_idx] = cpu_emb.cuda(gpu_id)
s = self.emb[idx]
if gpu_id >= 0:
s = s.cuda(gpu_id)
# During the training, we need to trace the computation.
# In this case, we need to record the computation path and compute the gradients.
if trace:
data = s.clone().detach().requires_grad_(True)
self.trace.append((idx, data))
else:
data = s
return data
def update(self, gpu_id=-1):
""" Update embeddings in a sparse manner
Sparse embeddings are updated in mini batches. we maintains gradient states for
each embedding so they can be updated separately.
Parameters
----------
gpu_id : int
Which gpu to accelerate the calculation. if -1 is provided, cpu is used.
"""
self.state_step += 1
with th.no_grad():
for idx, data in self.trace:
grad = data.grad.data
clr = self.lr
#clr = self.lr / (1 + (self.state_step - 1) * group['lr_decay'])
# the update is non-linear so indices must be unique
grad_indices = idx
grad_values = grad
if self.async_q is not None:
grad_indices.share_memory_()
grad_values.share_memory_()
self.async_q.put((grad_indices, grad_values, gpu_id))
else:
grad_sum = (grad_values * grad_values).mean(1)
device = self.state_sum.device
if device != grad_indices.device:
grad_indices = grad_indices.to(device)
if device != grad_sum.device:
grad_sum = grad_sum.to(device)
if self.has_cross_rel:
cpu_mask = self.cpu_bitmap[grad_indices]
cpu_idx = grad_indices[cpu_mask]
if cpu_idx.shape[0] > 0:
cpu_grad = grad_values[cpu_mask]
cpu_sum = grad_sum[cpu_mask].cpu()
cpu_idx = cpu_idx.cpu()
self.global_emb.state_sum.index_add_(0, cpu_idx, cpu_sum)
std = self.global_emb.state_sum[cpu_idx]
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * cpu_grad / std_values)
tmp = tmp.cpu()
self.global_emb.emb.index_add_(0, cpu_idx, tmp)
self.state_sum.index_add_(0, grad_indices, grad_sum)
std = self.state_sum[grad_indices] # _sparse_mask
if gpu_id >= 0:
std = std.cuda(gpu_id)
std_values = std.sqrt_().add_(1e-10).unsqueeze(1)
tmp = (-clr * grad_values / std_values)
if tmp.device != device:
tmp = tmp.to(device)
# TODO(zhengda) the overhead is here.
self.emb.index_add_(0, grad_indices, tmp)
self.trace = []
def curr_emb(self):
"""Return embeddings in trace.
"""
data = [data for _, data in self.trace]
return th.cat(data, 0)
| mit | 552ecf0adfdfb93b84e112ffef3089e9 | 32.504032 | 89 | 0.531472 | 4.029583 | false | false | false | false |
snap-stanford/ogb | ogb/nodeproppred/dataset_dgl.py | 1 | 10270 | import pandas as pd
import shutil, os
import os.path as osp
import torch
import numpy as np
from dgl.data.utils import load_graphs, save_graphs, Subset
import dgl
from ogb.utils.url import decide_download, download_url, extract_zip
from ogb.io.read_graph_dgl import read_graph_dgl, read_heterograph_dgl
from ogb.io.read_graph_raw import read_node_label_hetero, read_nodesplitidx_split_hetero
class DglNodePropPredDataset(object):
def __init__(self, name, root = 'dataset', meta_dict = None):
'''
- name (str): name of the dataset
- root (str): root directory to store the dataset folder
- meta_dict: dictionary that stores all the meta-information about data. Default is None,
but when something is passed, it uses its information. Useful for debugging for external contributers.
'''
self.name = name ## original name, e.g., ogbn-proteins
if meta_dict is None:
self.dir_name = '_'.join(name.split('-'))
# check if previously-downloaded folder exists.
# If so, use that one.
if osp.exists(osp.join(root, self.dir_name + '_dgl')):
self.dir_name = self.dir_name + '_dgl'
self.original_root = root
self.root = osp.join(root, self.dir_name)
master = pd.read_csv(os.path.join(os.path.dirname(__file__), 'master.csv'), index_col = 0)
if not self.name in master:
error_mssg = 'Invalid dataset name {}.\n'.format(self.name)
error_mssg += 'Available datasets are as follows:\n'
error_mssg += '\n'.join(master.keys())
raise ValueError(error_mssg)
self.meta_info = master[self.name]
else:
self.dir_name = meta_dict['dir_path']
self.original_root = ''
self.root = meta_dict['dir_path']
self.meta_info = meta_dict
# check version
# First check whether the dataset has been already downloaded or not.
# If so, check whether the dataset version is the newest or not.
# If the dataset is not the newest version, notify this to the user.
if osp.isdir(self.root) and (not osp.exists(osp.join(self.root, 'RELEASE_v' + str(self.meta_info['version']) + '.txt'))):
print(self.name + ' has been updated.')
if input('Will you update the dataset now? (y/N)\n').lower() == 'y':
shutil.rmtree(self.root)
self.download_name = self.meta_info['download_name'] ## name of downloaded file, e.g., tox21
self.num_tasks = int(self.meta_info['num tasks'])
self.task_type = self.meta_info['task type']
self.eval_metric = self.meta_info['eval metric']
self.num_classes = int(self.meta_info['num classes'])
self.is_hetero = self.meta_info['is hetero'] == 'True'
self.binary = self.meta_info['binary'] == 'True'
super(DglNodePropPredDataset, self).__init__()
self.pre_process()
def pre_process(self):
processed_dir = osp.join(self.root, 'processed')
pre_processed_file_path = osp.join(processed_dir, 'dgl_data_processed')
if osp.exists(pre_processed_file_path):
self.graph, label_dict = load_graphs(pre_processed_file_path)
if self.is_hetero:
self.labels = label_dict
else:
self.labels = label_dict['labels']
else:
### check if the downloaded file exists
if self.binary:
# npz format
has_necessary_file_simple = osp.exists(osp.join(self.root, 'raw', 'data.npz')) and (not self.is_hetero)
has_necessary_file_hetero = osp.exists(osp.join(self.root, 'raw', 'edge_index_dict.npz')) and self.is_hetero
else:
# csv file
has_necessary_file_simple = osp.exists(osp.join(self.root, 'raw', 'edge.csv.gz')) and (not self.is_hetero)
has_necessary_file_hetero = osp.exists(osp.join(self.root, 'raw', 'triplet-type-list.csv.gz')) and self.is_hetero
has_necessary_file = has_necessary_file_simple or has_necessary_file_hetero
if not has_necessary_file:
url = self.meta_info['url']
if decide_download(url):
path = download_url(url, self.original_root)
extract_zip(path, self.original_root)
os.unlink(path)
# delete folder if there exists
try:
shutil.rmtree(self.root)
except:
pass
shutil.move(osp.join(self.original_root, self.download_name), self.root)
else:
print('Stop download.')
exit(-1)
raw_dir = osp.join(self.root, 'raw')
### pre-process and save
add_inverse_edge = self.meta_info['add_inverse_edge'] == 'True'
if self.meta_info['additional node files'] == 'None':
additional_node_files = []
else:
additional_node_files = self.meta_info['additional node files'].split(',')
if self.meta_info['additional edge files'] == 'None':
additional_edge_files = []
else:
additional_edge_files = self.meta_info['additional edge files'].split(',')
if self.is_hetero:
graph = read_heterograph_dgl(raw_dir, add_inverse_edge = add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files, binary=self.binary)[0]
if self.binary:
tmp = np.load(osp.join(raw_dir, 'node-label.npz'))
label_dict = {}
for key in list(tmp.keys()):
label_dict[key] = tmp[key]
del tmp
else:
label_dict = read_node_label_hetero(raw_dir)
# convert into torch tensor
if 'classification' in self.task_type:
for nodetype in label_dict.keys():
# detect if there is any nan
node_label = label_dict[nodetype]
if np.isnan(node_label).any():
label_dict[nodetype] = torch.from_numpy(node_label).to(torch.float32)
else:
label_dict[nodetype] = torch.from_numpy(node_label).to(torch.long)
else:
for nodetype in label_dict.keys():
node_label = label_dict[nodetype]
label_dict[nodetype] = torch.from_numpy(node_label).to(torch.float32)
else:
graph = read_graph_dgl(raw_dir, add_inverse_edge = add_inverse_edge, additional_node_files = additional_node_files, additional_edge_files = additional_edge_files, binary=self.binary)[0]
### adding prediction target
if self.binary:
node_label = np.load(osp.join(raw_dir, 'node-label.npz'))['node_label']
else:
node_label = pd.read_csv(osp.join(raw_dir, 'node-label.csv.gz'), compression='gzip', header = None).values
if 'classification' in self.task_type:
# detect if there is any nan
if np.isnan(node_label).any():
node_label = torch.from_numpy(node_label).to(torch.float32)
else:
node_label = torch.from_numpy(node_label).to(torch.long)
else:
node_label = torch.from_numpy(node_label).to(torch.float32)
label_dict = {'labels': node_label}
print('Saving...')
save_graphs(pre_processed_file_path, graph, label_dict)
self.graph, label_dict = load_graphs(pre_processed_file_path)
if self.is_hetero:
self.labels = label_dict
else:
self.labels = label_dict['labels']
def get_idx_split(self, split_type = None):
if split_type is None:
split_type = self.meta_info['split']
path = osp.join(self.root, 'split', split_type)
# short-cut if split_dict.pt exists
if os.path.isfile(os.path.join(path, 'split_dict.pt')):
return torch.load(os.path.join(path, 'split_dict.pt'))
if self.is_hetero:
train_idx_dict, valid_idx_dict, test_idx_dict = read_nodesplitidx_split_hetero(path)
for nodetype in train_idx_dict.keys():
train_idx_dict[nodetype] = torch.from_numpy(train_idx_dict[nodetype]).to(torch.long)
valid_idx_dict[nodetype] = torch.from_numpy(valid_idx_dict[nodetype]).to(torch.long)
test_idx_dict[nodetype] = torch.from_numpy(test_idx_dict[nodetype]).to(torch.long)
return {'train': train_idx_dict, 'valid': valid_idx_dict, 'test': test_idx_dict}
else:
train_idx = torch.from_numpy(pd.read_csv(osp.join(path, 'train.csv.gz'), compression='gzip', header = None).values.T[0]).to(torch.long)
valid_idx = torch.from_numpy(pd.read_csv(osp.join(path, 'valid.csv.gz'), compression='gzip', header = None).values.T[0]).to(torch.long)
test_idx = torch.from_numpy(pd.read_csv(osp.join(path, 'test.csv.gz'), compression='gzip', header = None).values.T[0]).to(torch.long)
return {'train': train_idx, 'valid': valid_idx, 'test': test_idx}
def __getitem__(self, idx):
assert idx == 0, 'This dataset has only one graph'
return self.graph[idx], self.labels
def __len__(self):
return 1
def __repr__(self): # pragma: no cover
return '{}({})'.format(self.__class__.__name__, len(self))
if __name__ == '__main__':
dgl_dataset = DglNodePropPredDataset(name = 'ogbn-proteins')
print(dgl_dataset.num_classes)
split_index = dgl_dataset.get_idx_split()
print(dgl_dataset[0])
print(split_index)
| mit | e85928d4c1c5cd3409194e51fde50632 | 44.644444 | 207 | 0.555404 | 3.786873 | false | false | false | false |
adamchainz/django-mysql | src/django_mysql/models/fields/fixedchar.py | 1 | 1327 | from __future__ import annotations
from typing import Any
from typing import cast
from django.core import checks
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.models import CharField
from django_mysql.typing import DeconstructResult
class FixedCharField(CharField):
def check(self, **kwargs: Any) -> list[checks.CheckMessage]:
errors = super().check(**kwargs)
if isinstance(self.max_length, int) and (
self.max_length < 0 or self.max_length > 255
):
errors.append(
checks.Error(
"'max_length' must be between 0 and 255.",
hint=None,
obj=self,
id="django_mysql.E015",
)
)
return errors
def deconstruct(self) -> DeconstructResult:
name, path, args, kwargs = cast(DeconstructResult, super().deconstruct())
bad_paths = (
"django_mysql.models.fields.fixedchar.FixedCharField",
"django_mysql.models.fields.FixedCharField",
)
if path in bad_paths:
path = "django_mysql.models.FixedCharField"
return name, path, args, kwargs
def db_type(self, connection: BaseDatabaseWrapper) -> str:
return f"CHAR({self.max_length})"
| mit | e233b04ad6247d88a14896b51c41ac24 | 29.159091 | 81 | 0.60211 | 4.336601 | false | false | false | false |
adamchainz/django-mysql | tests/testapp/utils.py | 1 | 3411 | from __future__ import annotations
import sys
from contextlib import contextmanager
from types import TracebackType
from typing import Any
from typing import Generator
import pytest
from django.db import connection
from django.db import connections
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.mysql.base import DatabaseWrapper as MySQLDatabaseWrapper
from django.db.backends.utils import CursorWrapper
from django.test.utils import CaptureQueriesContext
if sys.version_info >= (3, 10):
from typing import TypeGuard
else:
from typing_extensions import TypeGuard
def conn_is_mysql(connection: BaseDatabaseWrapper) -> TypeGuard[MySQLDatabaseWrapper]:
return connection.vendor == "mysql"
@contextmanager
def skip_if_mysql_8_plus() -> Generator[None, None, None]:
if not conn_is_mysql(connection) or (
not connection.mysql_is_mariadb and connection.mysql_version >= (8,)
):
pytest.skip("Requires MySQL<8 or MariaDB")
yield
def column_type(table_name: str, column_name: str) -> str:
with connection.cursor() as cursor:
cursor.execute(
"""SELECT DATA_TYPE FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_SCHEMA = DATABASE() AND TABLE_NAME = %s AND
COLUMN_NAME = %s""",
(table_name, column_name),
)
type_: str = cursor.fetchone()[0]
return type_
class CaptureLastQuery:
def __init__(self, conn: BaseDatabaseWrapper | None = None) -> None:
if conn is None: # pragma: no branch
conn = connection
self.conn: BaseDatabaseWrapper = conn
def __enter__(self) -> CaptureLastQuery:
self.capturer = CaptureQueriesContext(self.conn)
self.capturer.__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.capturer.__exit__(exc_type, exc_val, exc_tb)
@property
def query(self) -> str:
return self.capturer.captured_queries[-1]["sql"]
class print_all_queries:
def __init__(self, conn: BaseDatabaseWrapper | None = None) -> None:
if conn is None: # pragma: no branch
conn = connection
self.conn: BaseDatabaseWrapper = conn
def __enter__(self) -> print_all_queries:
self.capturer = CaptureQueriesContext(self.conn)
self.capturer.__enter__()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
self.capturer.__exit__(exc_type, exc_val, exc_tb)
for q in self.capturer.captured_queries:
print(q["sql"])
def used_indexes(query: str, using: str = DEFAULT_DB_ALIAS) -> set[str]:
"""
Given SQL 'query', run EXPLAIN and return the names of the indexes used
"""
connection = connections[using]
with connection.cursor() as cursor:
cursor.execute("EXPLAIN " + query)
return {row["key"] for row in fetchall_dicts(cursor) if row["key"] is not None}
def fetchall_dicts(cursor: CursorWrapper) -> list[dict[str, Any]]:
columns = [x[0] for x in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()]
| mit | 274776f47005ef996bf44f64b0b40b52 | 30.583333 | 87 | 0.650249 | 3.934256 | false | false | false | false |
adamchainz/django-mysql | src/django_mysql/management/commands/dbparams.py | 1 | 4223 | from __future__ import annotations
import argparse
from typing import Any
from django.core.management import BaseCommand
from django.core.management import CommandError
from django.db import connections
from django.db import DEFAULT_DB_ALIAS
from django.db.utils import ConnectionDoesNotExist
from django_mysql.utils import settings_to_cmd_args
class Command(BaseCommand):
args = "<optional connection alias>"
help = (
"Outputs shell parameters representing database connection "
"suitable for inclusion in various tools' commandlines. The "
"connection alias should be a name from DATABASES - defaults to "
"'{default}'."
).format(default=DEFAULT_DB_ALIAS)
requires_system_checks: list[str] = []
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
parser.add_argument(
"alias",
metavar="alias",
nargs="?",
default=DEFAULT_DB_ALIAS,
help="Specify the database connection alias to output " "parameters for.",
)
parser.add_argument(
"--mysql",
action="store_true",
dest="show_mysql",
default=False,
help="Outputs flags for tools that take parameters in the "
"same format as the mysql client, e.g. mysql "
"$(./manage.py dbparams --mysql)",
)
parser.add_argument(
"--dsn",
action="store_true",
dest="show_dsn",
default=False,
help="Output a DSN for e.g. percona tools, e.g. "
"pt-online-schema-change $(./manage.py dbparams --dsn)",
)
def handle(
self, *args: Any, alias: str, show_mysql: bool, show_dsn: bool, **options: Any
) -> None:
try:
connection = connections[alias]
except ConnectionDoesNotExist:
raise CommandError(f"Connection '{alias}' does not exist")
if connection.vendor != "mysql":
raise CommandError(f"{alias} is not a MySQL database connection")
if show_mysql and show_dsn:
raise CommandError("Pass only one of --mysql and --dsn")
elif not show_mysql and not show_dsn:
show_mysql = True
settings_dict: dict[str, Any] = connection.settings_dict
if show_mysql:
self.output_for_mysql(settings_dict)
elif show_dsn:
self.output_for_dsn(settings_dict)
else: # pragma: no cover
raise AssertionError("Impossible")
def output_for_mysql(self, settings_dict: dict[str, Any]) -> None:
args = settings_to_cmd_args(settings_dict)
args = args[1:] # Delete the 'mysql' at the start
self.stdout.write(" ".join(args), ending="")
def output_for_dsn(self, settings_dict: dict[str, Any]) -> None:
cert = settings_dict["OPTIONS"].get("ssl", {}).get("ca")
if cert:
self.stderr.write(
"Warning: SSL params can't be passed in the DSN syntax; you "
"must pass them in your my.cnf. See: "
"https://www.percona.com/blog/2014/10/16/percona-toolkit-for-"
"mysql-with-mysql-ssl-connections/"
)
db = settings_dict["OPTIONS"].get("db", settings_dict["NAME"])
user = settings_dict["OPTIONS"].get("user", settings_dict["USER"])
passwd = settings_dict["OPTIONS"].get("passwd", settings_dict["PASSWORD"])
host = settings_dict["OPTIONS"].get("host", settings_dict["HOST"])
port = settings_dict["OPTIONS"].get("port", settings_dict["PORT"])
defaults_file = settings_dict["OPTIONS"].get("read_default_file")
args = []
if defaults_file:
args.append(f"F={defaults_file}")
if user:
args.append(f"u={user}")
if passwd:
args.append(f"p={passwd}")
if host:
if "/" in host:
args.append(f"S={host}")
else:
args.append(f"h={host}")
if port:
args.append(f"P={port}")
if db:
args.append(f"D={db}")
dsn = ",".join(args)
self.stdout.write(dsn, ending="")
| mit | a7ef1a3c1a0c977bb7446f59c5225c0a | 34.788136 | 86 | 0.574 | 4.12 | false | false | false | false |
adamchainz/django-mysql | src/django_mysql/operations.py | 1 | 5386 | from __future__ import annotations
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from django.db.migrations.operations.base import Operation
from django.db.migrations.state import ModelState
from django.utils.functional import cached_property
class InstallPlugin(Operation):
reduces_to_sql = False
reversible = True
def __init__(self, name: str, soname: str) -> None:
self.name = name
self.soname = soname
def state_forwards(self, app_label: str, state: ModelState) -> None:
pass # pragma: no cover
def database_forwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
if not self.plugin_installed(schema_editor):
schema_editor.execute(
f"INSTALL PLUGIN {self.name} SONAME %s", (self.soname,)
)
def database_backwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
if self.plugin_installed(schema_editor):
schema_editor.execute("UNINSTALL PLUGIN %s" % self.name)
def plugin_installed(self, schema_editor: BaseDatabaseSchemaEditor) -> bool:
with schema_editor.connection.cursor() as cursor:
cursor.execute(
"""SELECT COUNT(*)
FROM INFORMATION_SCHEMA.PLUGINS
WHERE PLUGIN_NAME LIKE %s""",
(self.name,),
)
count = cursor.fetchone()[0]
return count > 0
def describe(self) -> str:
return f"Installs plugin {self.name} from {self.soname}"
class InstallSOName(Operation):
reduces_to_sql = True
reversible = True
def __init__(self, soname: str) -> None:
self.soname = soname
def state_forwards(self, app_label: str, state: ModelState) -> None:
pass # pragma: no cover
def database_forwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
schema_editor.execute("INSTALL SONAME %s", (self.soname,))
def database_backwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_st: ModelState,
to_st: ModelState,
) -> None:
schema_editor.execute("UNINSTALL SONAME %s", (self.soname,))
def describe(self) -> str:
return "Installs library %s" % (self.soname)
class AlterStorageEngine(Operation):
def __init__(
self, name: str, to_engine: str, from_engine: str | None = None
) -> None:
self.name = name
self.engine = to_engine
self.from_engine = from_engine
@property
def reversible(self) -> bool:
return self.from_engine is not None
def state_forwards(self, app_label: str, state: ModelState) -> None:
pass
def database_forwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_state: ModelState,
to_state: ModelState,
) -> None:
self._change_engine(app_label, schema_editor, to_state, engine=self.engine)
def database_backwards(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
from_state: ModelState,
to_state: ModelState,
) -> None:
if self.from_engine is None:
raise NotImplementedError("You cannot reverse this operation")
self._change_engine(app_label, schema_editor, to_state, engine=self.from_engine)
def _change_engine(
self,
app_label: str,
schema_editor: BaseDatabaseSchemaEditor,
to_state: ModelState,
engine: str,
) -> None:
new_model = to_state.apps.get_model(app_label, self.name)
qn = schema_editor.connection.ops.quote_name
if self.allow_migrate_model( # pragma: no branch
schema_editor.connection.alias, new_model
):
with schema_editor.connection.cursor() as cursor:
cursor.execute(
"""SELECT COUNT(*) FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_SCHEMA=DATABASE() AND
TABLE_NAME = %s AND
ENGINE = %s""",
(new_model._meta.db_table, engine),
)
uses_engine_already = cursor.fetchone()[0] > 0
if uses_engine_already:
return
schema_editor.execute(
"ALTER TABLE {table} ENGINE={engine}".format(
table=qn(new_model._meta.db_table),
engine=engine,
)
)
@cached_property
def name_lower(self) -> str:
return self.name.lower()
def references_model(self, name: str, app_label: str | None = None) -> bool:
return name.lower() == self.name_lower
def describe(self) -> str:
if self.from_engine:
from_clause = f" from {self.from_engine}"
else:
from_clause = ""
return "Alter storage engine for {model}{from_clause} to {engine}".format(
model=self.name, from_clause=from_clause, engine=self.engine
)
| mit | ffc53c8e5828d35b89ccb4163145c53a | 29.777143 | 88 | 0.578723 | 4.04051 | false | false | false | false |
adamchainz/django-mysql | src/django_mysql/models/functions.py | 1 | 14448 | from __future__ import annotations
import datetime as dt
import json
import warnings
from typing import Any
from typing import Union
from django.db import connections
from django.db import DEFAULT_DB_ALIAS
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.models import CharField
from django.db.models import Expression
from django.db.models import Field as DjangoField
from django.db.models import Func
from django.db.models import IntegerField
from django.db.models import JSONField
from django.db.models import TextField
from django.db.models import Value
from django.db.models.sql.compiler import SQLCompiler
ExpressionArgument = Union[
Expression,
str, # column reference handled by Django
]
class SingleArgFunc(Func):
output_field_class: type[DjangoField]
def __init__(self, expression: ExpressionArgument) -> None:
super().__init__(expression, output_field=self.output_field_class())
# Control Flow Functions
class If(Func):
function = "IF"
def __init__(
self,
condition: ExpressionArgument,
true: ExpressionArgument,
false: ExpressionArgument | None = None,
output_field: DjangoField | None = None,
) -> None:
if output_field is None:
# Workaround for some ORM weirdness
output_field = DjangoField()
super().__init__(condition, true, false, output_field=output_field)
# Numeric Functions
class CRC32(SingleArgFunc):
function = "CRC32"
output_field_class = IntegerField
class Sign(SingleArgFunc):
function = "SIGN"
output_field_class = IntegerField
def __init__(self, *args: Any, **kwargs: Any) -> None:
warnings.warn(
(
"This function is deprecated. Use "
+ "django.db.models.functions.Sign instead."
),
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
# String Functions
class ConcatWS(Func):
"""
Stands for CONCAT_With-Separator
"""
function = "CONCAT_WS"
def __init__(
self, *expressions: ExpressionArgument, separator: str | None = ","
) -> None:
if len(expressions) < 2:
raise ValueError("ConcatWS must take at least two expressions")
if not hasattr(separator, "resolve_expression"):
separator = Value(separator)
# N.B. if separator is "," we could potentially use list field
output_field = TextField()
super().__init__(separator, *expressions, output_field=output_field)
class ELT(Func):
function = "ELT"
def __init__(
self,
num: ExpressionArgument,
expressions: list[ExpressionArgument] | tuple[ExpressionArgument],
) -> None:
value_exprs = []
for v in expressions:
if not hasattr(v, "resolve_expression"):
v = Value(v)
value_exprs.append(v)
super().__init__(num, *value_exprs, output_field=CharField())
class Field(Func):
function = "FIELD"
def __init__(
self,
field: ExpressionArgument,
values: list[ExpressionArgument] | tuple[ExpressionArgument],
**kwargs: Any,
) -> None:
values_exprs = []
for v in values:
if not hasattr(v, "resolve_expression"):
v = Value(v)
values_exprs.append(v)
super().__init__(field, *values_exprs)
# XML Functions
class UpdateXML(Func):
function = "UPDATEXML"
def __init__(
self,
xml_target: ExpressionArgument,
xpath_expr: ExpressionArgument,
new_xml: ExpressionArgument,
) -> None:
if not hasattr(xpath_expr, "resolve_expression"):
xpath_expr = Value(xpath_expr)
if not hasattr(new_xml, "resolve_expression"):
new_xml = Value(new_xml)
return super().__init__(
xml_target, xpath_expr, new_xml, output_field=TextField()
)
class XMLExtractValue(Func):
function = "EXTRACTVALUE"
def __init__(
self, xml_frag: ExpressionArgument, xpath_expr: ExpressionArgument
) -> None:
if not hasattr(xpath_expr, "resolve_expression"):
xpath_expr = Value(xpath_expr)
return super().__init__(xml_frag, xpath_expr, output_field=TextField())
# Encryption Functions
class MD5(SingleArgFunc):
function = "MD5"
output_field_class = CharField
def __init__(self, *args: Any, **kwargs: Any) -> None:
warnings.warn(
"This function is deprecated. Use django.db.models.functions.MD5 instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class SHA1(SingleArgFunc):
function = "SHA1"
output_field_class = CharField
def __init__(self, *args: Any, **kwargs: Any) -> None:
warnings.warn(
"This function is deprecated. Use django.db.models.functions.SHA1 instead.",
DeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
class SHA2(Func):
function = "SHA2"
hash_lens = (224, 256, 384, 512)
def __init__(self, expression: ExpressionArgument, hash_len: int = 512) -> None:
if hash_len not in self.hash_lens:
raise ValueError(
"hash_len must be one of {}".format(
",".join(str(x) for x in self.hash_lens)
)
)
warnings.warn(
(
"This function is deprecated. Use "
+ "django.db.models.functions.SHA{hash_len} instead."
),
DeprecationWarning,
stacklevel=2,
)
super().__init__(expression, Value(hash_len), output_field=TextField())
# Information Functions
class LastInsertId(Func):
function = "LAST_INSERT_ID"
def __init__(self, expression: ExpressionArgument | None = None) -> None:
if expression is not None:
super().__init__(expression, output_field=IntegerField())
else:
super().__init__(output_field=IntegerField())
@classmethod
def get(cls, using: str = DEFAULT_DB_ALIAS) -> int:
# N.B. did try getting it from connection.connection.insert_id() (The
# MySQLdb query-free method) but it did not work with non-default
# database connections in Django, and the reason was not clear
with connections[using].cursor() as cursor:
cursor.execute("SELECT LAST_INSERT_ID()")
return cursor.fetchone()[0]
# JSON Functions
class JSONExtract(Func):
function = "JSON_EXTRACT"
def __init__(
self,
expression: ExpressionArgument,
*paths: ExpressionArgument,
output_field: type[DjangoField] | None = None,
) -> None:
exprs = [expression]
for path in paths:
if not hasattr(path, "resolve_expression"):
path = Value(path)
exprs.append(path)
if output_field is not None:
if len(paths) > 1:
raise TypeError(
"output_field won't work with more than one path, as a "
"JSON Array will be returned"
)
else:
output_field = JSONField()
super().__init__(*exprs, output_field=output_field)
class JSONKeys(Func):
function = "JSON_KEYS"
def __init__(
self,
expression: ExpressionArgument,
path: ExpressionArgument | None = None,
) -> None:
exprs = [expression]
if path is not None:
if not hasattr(path, "resolve_expression"):
path = Value(path)
exprs.append(path)
super().__init__(*exprs, output_field=JSONField())
class JSONLength(Func):
function = "JSON_LENGTH"
def __init__(
self,
expression: ExpressionArgument,
path: ExpressionArgument | None = None,
*,
output_field: DjangoField | None = None,
**extra: Any,
) -> None:
if output_field is None:
output_field = IntegerField()
exprs = [expression]
if path is not None:
if not hasattr(path, "resolve_expression"):
path = Value(path)
exprs.append(path)
super().__init__(*exprs, output_field=output_field)
# When only Django 3.1+ is supported, JSONValue can be replaced with
# Cast(..., output_field=JSONField())
class JSONValue(Expression):
def __init__(
self, data: None | int | float | str | list[Any] | dict[str, Any]
) -> None:
self._data = data
def as_sql(
self,
compiler: SQLCompiler,
connection: BaseDatabaseWrapper,
) -> tuple[str, tuple[Any, ...]]:
if connection.vendor != "mysql": # pragma: no cover
raise AssertionError("JSONValue only supports MySQL/MariaDB")
json_string = json.dumps(self._data, allow_nan=False)
if connection.vendor == "mysql" and connection.mysql_is_mariadb:
# MariaDB doesn't support explicit cast to JSON.
return "JSON_EXTRACT(%s, '$')", (json_string,)
else:
return "CAST(%s AS JSON)", (json_string,)
class BaseJSONModifyFunc(Func):
def __init__(
self,
expression: ExpressionArgument,
data: dict[
str,
(
ExpressionArgument
| None
| int
| float
| str
| list[Any]
| dict[str, Any]
),
],
) -> None:
if not data:
raise ValueError('"data" cannot be empty')
exprs = [expression]
for path, value in data.items():
if not hasattr(path, "resolve_expression"):
path = Value(path)
exprs.append(path)
if not hasattr(value, "resolve_expression"):
value = JSONValue(value)
exprs.append(value)
super().__init__(*exprs, output_field=JSONField())
class JSONInsert(BaseJSONModifyFunc):
function = "JSON_INSERT"
class JSONReplace(BaseJSONModifyFunc):
function = "JSON_REPLACE"
class JSONSet(BaseJSONModifyFunc):
function = "JSON_SET"
class JSONArrayAppend(BaseJSONModifyFunc):
function = "JSON_ARRAY_APPEND"
# MariaDB Regexp Functions
class RegexpInstr(Func):
function = "REGEXP_INSTR"
def __init__(
self, expression: ExpressionArgument, regex: ExpressionArgument
) -> None:
if not hasattr(regex, "resolve_expression"):
regex = Value(regex)
super().__init__(expression, regex, output_field=IntegerField())
class RegexpReplace(Func):
function = "REGEXP_REPLACE"
def __init__(
self,
expression: ExpressionArgument,
regex: ExpressionArgument,
replace: ExpressionArgument,
) -> None:
if not hasattr(regex, "resolve_expression"):
regex = Value(regex)
if not hasattr(replace, "resolve_expression"):
replace = Value(replace)
super().__init__(expression, regex, replace, output_field=CharField())
class RegexpSubstr(Func):
function = "REGEXP_SUBSTR"
def __init__(
self, expression: ExpressionArgument, regex: ExpressionArgument
) -> None:
if not hasattr(regex, "resolve_expression"):
regex = Value(regex)
super().__init__(expression, regex, output_field=CharField())
# MariaDB Dynamic Columns Functions
class AsType(Func):
"""
Helper for ColumnAdd when you want to add a column with a given type
"""
function = ""
template = "%(expressions)s AS %(data_type)s"
def __init__(self, expression: ExpressionArgument, data_type: str) -> None:
from django_mysql.models.fields.dynamic import KeyTransform
if not hasattr(expression, "resolve_expression"):
expression = Value(expression)
if data_type not in KeyTransform.TYPE_MAP and data_type != "BINARY":
raise ValueError(f"Invalid data_type '{data_type}'")
super().__init__(expression, data_type=data_type)
class ColumnAdd(Func):
function = "COLUMN_ADD"
def __init__(
self,
expression: ExpressionArgument,
to_add: dict[
str, ExpressionArgument | float | int | dt.date | dt.time | dt.datetime
],
) -> None:
from django_mysql.models.fields import DynamicField
expressions = [expression]
for name, value in to_add.items():
if not hasattr(name, "resolve_expression"):
name = Value(name)
if isinstance(value, dict):
raise ValueError("ColumnAdd with nested values is not supported")
if not hasattr(value, "resolve_expression"):
value = Value(value)
expressions.extend((name, value))
super().__init__(*expressions, output_field=DynamicField())
class ColumnDelete(Func):
function = "COLUMN_DELETE"
def __init__(
self, expression: ExpressionArgument, *to_delete: ExpressionArgument
) -> None:
from django_mysql.models.fields import DynamicField
expressions = [expression]
for name in to_delete:
if not hasattr(name, "resolve_expression"):
name = Value(name)
expressions.append(name)
super().__init__(*expressions, output_field=DynamicField())
class ColumnGet(Func):
function = "COLUMN_GET"
template = "COLUMN_GET(%(expressions)s AS %(data_type)s)"
def __init__(
self,
expression: ExpressionArgument,
column_name: ExpressionArgument,
data_type: str,
):
from django_mysql.models.fields.dynamic import DynamicField, KeyTransform
if not hasattr(column_name, "resolve_expression"):
column_name = Value(column_name)
output_field: DjangoField[Any, Any]
if data_type == "BINARY":
output_field = DynamicField()
else:
try:
output_field = KeyTransform.TYPE_MAP[data_type]
except KeyError:
raise ValueError(f"Invalid data_type {data_type!r}")
super().__init__(
expression, column_name, output_field=output_field, data_type=data_type
)
| mit | 9948aebd76f35eb14606dcdf413d079e | 26.311909 | 88 | 0.587279 | 4.273292 | false | false | false | false |
uber/tchannel-python | tests/thrift/test_module.py | 1 | 2840 | # Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import pytest
import six
from tchannel import thrift_request_builder
from tchannel.thrift.module import ThriftRequest
from tchannel.thrift.module import ThriftRequestMaker
from tests.data.generated.ThriftTest import ThriftTest
@pytest.mark.skipif(six.PY3, reason='Deprecated')
@pytest.mark.call
def test_from_thrift_class_should_return_request_maker():
maker = thrift_request_builder('thrift_test', ThriftTest)
assert isinstance(maker, ThriftRequestMaker)
@pytest.mark.skipif(six.PY3, reason='Deprecated')
@pytest.mark.call
def test_maker_should_have_thrift_iface_methods():
maker = thrift_request_builder('thrift_test', ThriftTest)
# extract list of maker methods
maker_methods = [
m[0] for m in
inspect.getmembers(maker, predicate=inspect.ismethod)
]
# extract list of iface methods
iface_methods = [
m[0] for m in
inspect.getmembers(ThriftTest.Iface, predicate=inspect.ismethod)
]
# verify all of iface_methods exist in maker_methods
assert set(iface_methods) < set(maker_methods)
@pytest.mark.skipif(six.PY3, reason='Deprecated')
@pytest.mark.call
def test_request_maker_should_return_request():
maker = thrift_request_builder('thrift_test', ThriftTest)
request = maker.testString('hi')
assert isinstance(request, ThriftRequest)
assert request.service == 'thrift_test'
assert request.endpoint == 'ThriftTest::testString'
assert request.result_type == ThriftTest.testString_result
assert request.call_args == ThriftTest.testString_args(thing='hi')
| mit | 31f94af8299a7483cc8bb53db6553e57 | 34.5 | 79 | 0.753521 | 3.94993 | false | true | false | false |
uber/tchannel-python | tchannel/retry.py | 1 | 1696 | # Copyright (c) 2016 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
#: Retry the request on failures to connect to a remote host. This is the
#: default retry behavior.
CONNECTION_ERROR = 'c'
#: Never retry the request.
NEVER = 'n'
#: Retry the request on timeouts waiting for a response.
TIMEOUT = 't'
#: Retry the request on failures to connect and timeouts after connecting.
CONNECTION_ERROR_AND_TIMEOUT = 'ct'
DEFAULT = CONNECTION_ERROR
#: The default number of times to retry a request. This is in addition to the
#: original request.
DEFAULT_RETRY_LIMIT = 4
| mit | a9931bdb5a9ad11e71a93ef4e043e314 | 39.380952 | 79 | 0.764151 | 4.250627 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_junkfiles.py | 1 | 8026 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_junkfiles
# Purpose: From Spidering, identifies backup and temporary files.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 23/08/2014
# Copyright: (c) Steve Micallef 2014
# Licence: MIT
# -------------------------------------------------------------------------------
import random
from spiderfoot import SpiderFootEvent, SpiderFootHelpers, SpiderFootPlugin
class sfp_junkfiles(SpiderFootPlugin):
meta = {
'name': "Junk File Finder",
'summary': "Looks for old/temporary and other similar files.",
'flags': ["slow", "errorprone", "invasive"],
'useCases': ["Footprint"],
'categories': ["Crawling and Scanning"]
}
# Default options
opts = {
'fileexts': ['tmp', 'bak', 'old'],
'urlextstry': ['asp', 'php', 'jsp', ],
'files': ["old", "passwd", ".htaccess", ".htpasswd",
"Thumbs.db", "backup"],
'dirs': ['zip', 'tar.gz', 'tgz', 'tar']
}
# Option descriptions
optdescs = {
'fileexts': "File extensions to try.",
'urlextstry': "Try those extensions against URLs with these extensions.",
'files': "Try to fetch each of these files from the directory of the URL.",
'dirs': "Try to fetch the containing folder with these extensions."
}
results = None
hosts = None
skiphosts = None
bases = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.hosts = self.tempStorage()
self.skiphosts = self.tempStorage()
self.bases = self.tempStorage()
self.__dataSource__ = "Target Website"
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["LINKED_URL_INTERNAL"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["JUNK_FILE"]
# Test how trustworthy a result is
def checkValidity(self, junkUrl):
# Try and fetch an obviously missing version of the junk file
fetch = junkUrl + str(random.SystemRandom().randint(0, 99999999))
res = self.sf.fetchUrl(fetch, headOnly=True,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
verify=False)
if res['code'] != "404":
host = SpiderFootHelpers.urlBaseUrl(junkUrl)
self.skiphosts[host] = True
return False
return True
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if eventData in self.results:
return
self.results[eventData] = True
host = SpiderFootHelpers.urlBaseUrl(eventData)
if host in self.skiphosts:
self.debug("Skipping " + host + " because it doesn't return 404s.")
return
# http://www/blah/abc.php -> try http://www/blah/abc.php.[fileexts]
for ext in self.opts['urlextstry']:
if host in self.skiphosts:
self.debug("Skipping " + host + " because it doesn't return 404s.")
return
if "." + ext + "?" in eventData or "." + ext + "#" in eventData or \
eventData.endswith("." + ext):
bits = eventData.split("?")
for x in self.opts['fileexts']:
if self.checkForStop():
return
self.debug("Trying " + x + " against " + eventData)
fetch = bits[0] + "." + x
if fetch in self.results:
self.debug("Skipping, already fetched.")
continue
self.results[fetch] = True
res = self.sf.fetchUrl(fetch, headOnly=True,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
sizeLimit=10000000,
verify=False)
if res['realurl'] != fetch:
self.debug("Skipping because " + res['realurl'] + " isn't the fetched URL of " + fetch)
continue
if res['code'] == "200":
if not self.checkValidity(fetch):
continue
evt = SpiderFootEvent("JUNK_FILE", fetch, self.__name__, event)
self.notifyListeners(evt)
base = SpiderFootHelpers.urlBaseDir(eventData)
if not base or base in self.bases:
return
self.bases[base] = True
# http://www/blah/abc.html -> try http://www/blah/[files]
for f in self.opts['files']:
if self.checkForStop():
return
if host in self.skiphosts:
self.debug("Skipping " + host + " because it doesn't return 404s.")
return
self.debug("Trying " + f + " against " + eventData)
fetch = base + f
if fetch in self.results:
self.debug("Skipping, already fetched.")
continue
self.results[fetch] = True
res = self.sf.fetchUrl(fetch, headOnly=True,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
verify=False)
if res['realurl'] != fetch:
self.debug("Skipping because " + res['realurl'] + " isn't the fetched URL of " + fetch)
continue
if res['code'] == "200":
if not self.checkValidity(fetch):
continue
evt = SpiderFootEvent("JUNK_FILE", fetch, self.__name__, event)
self.notifyListeners(evt)
# don't do anything with the root directory of a site
self.debug(f"Base: {base}, event: {eventData}")
if base in [eventData, eventData + "/"]:
return
# http://www/blah/abc.html -> try http://www/blah.[dirs]
for dirfile in self.opts['dirs']:
if self.checkForStop():
return
if host in self.skiphosts:
self.debug("Skipping " + host + " because it doesn't return 404s.")
return
if base.count('/') == 3:
self.debug("Skipping base url.")
continue
self.debug("Trying " + dirfile + " against " + eventData)
fetch = base[0:len(base) - 1] + "." + dirfile
if fetch in self.results:
self.debug("Skipping, already fetched.")
continue
self.results[fetch] = True
res = self.sf.fetchUrl(fetch, headOnly=True,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
verify=False)
if res['realurl'] != fetch:
self.debug("Skipping because " + res['realurl'] + " isn't the fetched URL of " + fetch)
continue
if res['code'] == "200":
if not self.checkValidity(fetch):
continue
evt = SpiderFootEvent("JUNK_FILE", fetch, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_junkfiles class
| mit | 62bfba0f858561fabdcf6798dccd6645 | 35.986175 | 111 | 0.499875 | 4.436705 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_ipqualityscore.py | 1 | 8945 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_ipqualityscore
# Purpose: Spiderfoot module to check whether a target is malicious
# using IPQualityScore API
#
# Author: Krishnasis Mandal <krishnasis@hotmail.com>
#
# Created: 2020-10-07
# Copyright: (c) Steve Micallef
# Licence: MIT
# -------------------------------------------------------------------------------
import json
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_ipqualityscore(SpiderFootPlugin):
meta = {
"name": "IPQualityScore",
"summary": "Determine if target is malicious using IPQualityScore API",
'flags': ["apikey"],
"useCases": ["Investigate", "Passive"],
"categories": ["Reputation Systems"],
"dataSource": {
"website": "https://www.ipqualityscore.com/",
"model": "FREE_AUTH_LIMITED",
"references": [
"https://www.ipqualityscore.com/documentation/overview"
],
"apiKeyInstructions": [
"Visit https://www.ipqualityscore.com/",
"Click on 'Plans'",
"Register a free account",
"Visit https://www.ipqualityscore.com/user/settings",
"Your API key will be listed under 'API Key'"
],
"favIcon": "https://www.ipqualityscore.com/templates/img/icons/fav/favicon-32x32.png",
"logo": "https://www.ipqualityscore.com/templates/img/logo.png",
"description": "IPQualityScore's suite of fraud prevention tools automate quality control "
"to prevent bots, fake accounts, fraudsters, suspicious transactions, "
"& malicious users without interrupting the user experience.",
},
}
opts = {
"api_key": "",
"abuse_score_threshold": 85,
"strictness": 0
}
optdescs = {
"api_key": "IPQualityScore API Key",
"abuse_score_threshold": "Minimum abuse score for target to be considered malicious (0 - 100)",
"strictness": "Depth of the reputation checks to be performed on the target (0 - 2)"
}
errorState = False
def setup(self, sfc, userOpts=None):
if userOpts is None:
userOpts = {}
self.sf = sfc
self.results = self.tempStorage()
self.opts.update(userOpts)
def watchedEvents(self):
return [
"DOMAIN_NAME",
"EMAILADDR",
"IP_ADDRESS",
"PHONE_NUMBER",
]
def producedEvents(self):
return [
"EMAILADDR_DISPOSABLE",
"EMAILADDR_COMPROMISED",
"GEOINFO",
"MALICIOUS_PHONE_NUMBER",
"MALICIOUS_EMAILADDR",
"MALICIOUS_IPADDR",
"MALICIOUS_INTERNET_NAME",
"PHONE_NUMBER_TYPE",
"RAW_RIR_DATA"
]
def handle_error_response(self, qry, res):
try:
error_info = json.loads(res["content"])
except Exception:
error_info = None
if error_info:
error_message = error_info.get("message")
else:
error_message = None
if error_message:
error_str = f", message {error_message}"
else:
error_str = ""
self.error(f"Failed to get results for {qry}, code {res['code']}{error_str}")
def query(self, qry, eventName):
queryString = ""
if eventName == "PHONE_NUMBER":
queryString = f"https://ipqualityscore.com/api/json/phone/{self.opts['api_key']}/{qry}?strictness={self.opts['strictness']}"
elif eventName == "EMAILADDR":
queryString = f"https://ipqualityscore.com/api/json/email/{self.opts['api_key']}/{qry}?strictness={self.opts['strictness']}"
elif eventName in ['IP_ADDRESS', 'DOMAIN_NAME']:
queryString = f"https://ipqualityscore.com/api/json/ip/{self.opts['api_key']}/{qry}?strictness={self.opts['strictness']}"
res = self.sf.fetchUrl(
queryString,
timeout=self.opts["_fetchtimeout"],
useragent="SpiderFoot",
)
if not res['content']:
self.info(f"No IPQualityScore info found for {qry}")
return None
try:
r = json.loads(res['content'])
if res["code"] != "200" or not r.get("success"):
self.handle_error_response(qry, res)
return None
return r
except Exception as e:
self.error(f"Error processing JSON response from IPQualityScore: {e}")
return None
def getGeoInfo(self, data):
geoInfo = ""
city = data.get('city')
country = data.get('country')
if not country:
country = data.get('country_code')
zipcode = data.get('zip_code')
region = data.get('region')
if city:
geoInfo += city + ", "
if region:
geoInfo += region + ", "
if country:
geoInfo += country + " "
if zipcode:
geoInfo += zipcode
return geoInfo
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if self.opts["api_key"] == "":
self.error(
f"You enabled {self.__class__.__name__} but did not set an API Key!"
)
self.errorState = True
return
if eventData in self.results:
self.debug(f"Skipping {eventData} as already mapped.")
return
self.results[eventData] = True
data = self.query(eventData, eventName)
if not data:
return
fraudScore = data.get('fraud_score')
recentAbuse = data.get('recent_abuse')
botStatus = data.get('bot_status')
malicious = False
maliciousDesc = ""
if fraudScore >= self.opts['abuse_score_threshold'] or recentAbuse or botStatus:
evt = SpiderFootEvent("RAW_RIR_DATA", str(data), self.__name__, event)
self.notifyListeners(evt)
malicious = True
maliciousDesc = f"IPQualityScore [{eventData}]\n"
if eventName == "PHONE_NUMBER":
if malicious:
maliciousDesc += f" - FRAUD SCORE: {fraudScore}\n - ACTIVE: {data.get('active')}\n - RISKY: {data.get('risky')}\n - RECENT ABUSE: {recentAbuse}"
evt = SpiderFootEvent("MALICIOUS_PHONE_NUMBER", maliciousDesc, self.__name__, event)
self.notifyListeners(evt)
phoneNumberType = data.get('line_type')
if phoneNumberType:
evt = SpiderFootEvent("PHONE_NUMBER_TYPE", phoneNumberType, self.__name__, event)
self.notifyListeners(evt)
geoInfo = self.getGeoInfo(data)
if geoInfo:
evt = SpiderFootEvent("GEOINFO", geoInfo, self.__name__, event)
self.notifyListeners(evt)
elif eventName == "EMAILADDR":
if malicious:
maliciousDesc += f" - FRAUD SCORE: {fraudScore}\n - HONEYPOT: {data.get('honeypot')}\n - SPAM TRAP SCORE: {data.get('spam_trap_score')}\n - RECENT ABUSE: {recentAbuse}"
evt = SpiderFootEvent("MALICIOUS_EMAILADDR", maliciousDesc, self.__name__, event)
self.notifyListeners(evt)
if data.get('disposable'):
evt = SpiderFootEvent("EMAILADDR_DISPOSABLE", eventData, self.__name__, event)
self.notifyListeners(evt)
if data.get('leaked'):
evt = SpiderFootEvent("EMAILADDR_COMPROMISED", f"{eventData} [Unknown]", self.__name__, event)
self.notifyListeners(evt)
elif eventName in ['IP_ADDRESS', 'DOMAIN_NAME']:
if malicious:
maliciousDesc += f" - FRAUD SCORE: {fraudScore}\n - BOT STATUS: {botStatus}\n - RECENT ABUSE: {recentAbuse}\n - ABUSE VELOCITY: {data.get('abuse_velocity')}\n - VPN: {data.get('vpn')}\n - ACTIVE VPN: {data.get('active_vpn')}\n - TOR: {data.get('tor')}\n - ACTIVE TOR: {data.get('active_tor')}"
if eventName == "IP_ADDRESS":
evt = SpiderFootEvent("MALICIOUS_IPADDR", maliciousDesc, self.__name__, event)
elif eventName == "DOMAIN_NAME":
evt = SpiderFootEvent("MALICIOUS_INTERNET_NAME", maliciousDesc, self.__name__, event)
self.notifyListeners(evt)
geoInfo = self.getGeoInfo(data)
if geoInfo:
evt = SpiderFootEvent("GEOINFO", geoInfo, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_ipqualityscore class
| mit | 195829f2c86f3c68ac21269ca0a686e7 | 36.584034 | 309 | 0.547904 | 3.812873 | false | false | false | false |
smicallef/spiderfoot | test/unit/modules/test_sfp_customfeed.py | 1 | 1480 | import pytest
import unittest
from modules.sfp_customfeed import sfp_customfeed
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleCustomfeed(unittest.TestCase):
def test_opts(self):
module = sfp_customfeed()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_customfeed()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_customfeed()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_customfeed()
self.assertIsInstance(module.producedEvents(), list)
def test_handleEvent_no_feed_url_should_set_errorState(self):
sf = SpiderFoot(self.default_options)
module = sfp_customfeed()
module.setup(sf, dict())
target_value = 'example target value'
target_type = 'IP_ADDRESS'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
self.assertTrue(module.errorState)
| mit | df227ef995ce27b126f635d9cc2ddba8 | 29.204082 | 81 | 0.670946 | 3.709273 | false | true | false | false |
smicallef/spiderfoot | modules/sfp_dnsdb.py | 1 | 12221 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_dnsdb
# Purpose: SpiderFoot plug-in that resolves and gets history of domains and IPs
#
# Author: Filip Aleksić <faleksicdev@gmail.com>
#
# Created: 2020-09-09
# Copyright: (c) Steve Micallef
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import re
import time
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_dnsdb(SpiderFootPlugin):
meta = {
"name": "DNSDB",
"summary": "Query FarSight's DNSDB for historical and passive DNS data.",
'flags': ["apikey"],
"useCases": ["Passive", "Footprint", "Investigate"],
"categories": ["Passive DNS"],
"dataSource": {
"website": "https://www.farsightsecurity.com",
"model": "FREE_AUTH_LIMITED",
"references": [
"https://docs.dnsdb.info/dnsdb-apiv2/",
"https://www.farsightsecurity.com/get-started/"
"https://www.farsightsecurity.com/solutions/dnsdb/",
],
"apiKeyInstructions": [
"Visit https://www.farsightsecurity.com/get-started/",
"Select the model that best fit your needs (free or premium)",
"Fill in the form to get API key",
"Check your email for your API Key ",
],
"favIcon": "https://www.farsightsecurity.com/favicon.ico",
"logo": "https://www.farsightsecurity.com/assets/media/svg/farsight-logo.svg",
"description": "Farsight Security’s DNSDB is the world’s largest "
"database of DNS resolution and change data. Started in 2010 and "
"updated in real-time, DNSDB provides the most comprehensive "
"history of domains and IP addresses worldwide.",
},
}
opts = {
"api_key": "",
"age_limit_days": 0,
"verify": True,
"cohostsamedomain": False,
"maxcohost": 100,
}
optdescs = {
"api_key": "DNSDB API Key.",
"age_limit_days": "Ignore any DNSDB records older than this many days. 0 = unlimited.",
"verify": "Verify co-hosts are valid by checking if they still resolve to the shared IP.",
"cohostsamedomain": "Treat co-hosted sites on the same target domain as co-hosting?",
"maxcohost": "Stop reporting co-hosted sites after this many are found, as it would likely indicate web hosting.",
}
results = None
errorState = False
cohostcount = 0
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.cohostcount = 0
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ["IP_ADDRESS", "IPV6_ADDRESS", "DOMAIN_NAME"]
# What events this module produces
def producedEvents(self):
return [
"RAW_RIR_DATA",
"INTERNET_NAME",
"INTERNET_NAME_UNRESOLVED",
"PROVIDER_DNS",
"DNS_TEXT",
"PROVIDER_MAIL",
"IP_ADDRESS",
"IPV6_ADDRESS",
"CO_HOSTED_SITE",
]
def query(self, endpoint, queryType, query):
if endpoint not in ("rrset", "rdata"):
self.error(f"Endpoint MUST be rrset or rdata, you sent {endpoint}")
return None
if queryType not in ("name", "ip"):
self.error(f"Query type MUST be name or ip, you sent {queryType}")
return None
headers = {"Accept": "application/x-ndjson", "X-API-Key": self.opts["api_key"]}
res = self.sf.fetchUrl(
f"https://api.dnsdb.info/dnsdb/v2/lookup/{endpoint}/{queryType}/{query}",
timeout=30,
useragent="SpiderFoot",
headers=headers,
)
if res["code"] == "429":
self.error("You are being rate-limited by DNSDB")
self.errorState = True
return None
if res["content"] is None:
self.info(f"No DNSDB record found for {query}")
return None
splittedContent = res["content"].strip().split("\n")
if len(splittedContent) == 2:
self.info(f"No DNSDB record found for {query}")
return None
if len(splittedContent) < 2:
self.info(f"Unexpected DNSDB response {query}")
return None
try:
records = []
for content in splittedContent:
records.append(json.loads(content))
except json.JSONDecodeError as e:
self.error(f"Error processing JSON response from DNSDB: {e}")
return None
return records[1:-1]
def isTooOld(self, lastSeen):
ageLimitTs = int(time.time()) - (86400 * self.opts["age_limit_days"])
if self.opts["age_limit_days"] > 0 and lastSeen < ageLimitTs:
self.debug("Record found but too old, skipping.")
return True
return False
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if self.opts["api_key"] == "":
self.error("You enabled sfp_dnsdb but did not set an API key!")
self.errorState = True
return
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
self.results[eventData] = True
responseData = set()
coHosts = set()
if eventName == "DOMAIN_NAME":
rrsetRecords = self.query("rrset", "name", eventData)
if rrsetRecords is None:
return
evt = SpiderFootEvent("RAW_RIR_DATA", str(rrsetRecords), self.__name__, event)
self.notifyListeners(evt)
for record in rrsetRecords:
record = record.get("obj")
if self.checkForStop():
return
if self.isTooOld(record.get("time_last", 0)):
continue
if record.get("rrtype") not in (
"A",
"AAAA",
"MX",
"NS",
"TXT",
"CNAME",
):
continue
for data in record.get("rdata"):
data = data.rstrip(".")
if data in responseData:
continue
responseData.add(data)
if record.get("rrtype") == "A":
if not self.sf.validIP(data):
self.debug(f"Skipping invalid IP address {data}")
continue
if self.opts["verify"] and not self.sf.validateIP(
eventData, data
):
self.debug(
f"Host {eventData} no longer resolves to {data}"
)
continue
evt = SpiderFootEvent("IP_ADDRESS", data, self.__name__, event)
if record.get("rrtype") == "AAAA":
if not self.getTarget().matches(
data, includeChildren=True, includeParents=True
):
continue
if not self.sf.validIP6(data):
self.debug("Skipping invalid IPv6 address " + data)
continue
if self.opts["verify"] and not self.sf.validateIP(
eventData, data
):
self.debug(
"Host " + eventData + " no longer resolves to " + data
)
continue
evt = SpiderFootEvent("IPV6_ADDRESS", data, self.__name__, event)
elif record.get("rrtype") == "MX":
data = re.sub(r'.*\s+(.*)', r'\1', data)
evt = SpiderFootEvent("PROVIDER_MAIL", data, self.__name__, event)
elif record.get("rrtype") == "NS":
evt = SpiderFootEvent("PROVIDER_DNS", data, self.__name__, event)
elif record.get("rrtype") == "TXT":
data = data.replace('"', '')
evt = SpiderFootEvent("DNS_TEXT", data, self.__name__, event)
elif record.get("rrtype") == "CNAME":
if not self.getTarget().matches(data):
coHosts.add(data)
self.notifyListeners(evt)
rdataRecords = self.query("rdata", "name", eventData)
if rdataRecords is None:
return
evt = SpiderFootEvent("RAW_RIR_DATA", str(rdataRecords), self.__name__, event)
self.notifyListeners(evt)
for record in rdataRecords:
record = record.get("obj")
if self.isTooOld(record.get("time_last", 0)):
continue
if record.get("rrtype") not in ("NS", "CNAME"):
continue
data = record.get("rrname").rstrip(".")
if data in responseData:
continue
responseData.add(data)
if record.get("rrtype") == "NS":
evt = SpiderFootEvent("PROVIDER_DNS", data, self.__name__, event)
elif record.get("rrtype") == "CNAME":
if not self.getTarget().matches(data):
coHosts.add(data)
elif eventName in ("IP_ADDRESS", "IPV6_ADDRESS"):
rdataRecords = self.query("rdata", "ip", eventData)
if rdataRecords is None:
return
evt = SpiderFootEvent("RAW_RIR_DATA", str(rdataRecords), self.__name__, event)
self.notifyListeners(evt)
for record in rdataRecords:
record = record.get("obj")
if self.checkForStop():
return
if self.isTooOld(record.get("time_last", 0)):
continue
if record.get("rrtype") not in ("A", "AAAA"):
continue
data = record.get("rrname").rstrip(".")
if data in responseData:
continue
responseData.add(data)
if not self.getTarget().matches(data):
coHosts.add(data)
continue
if self.opts["verify"] and not self.sf.resolveHost(data) and not self.sf.resolveHost6(data):
self.debug(f"Host {data} could not be resolved")
evt = SpiderFootEvent("INTERNET_NAME_UNRESOLVED", data, self.__name__, event)
else:
evt = SpiderFootEvent("INTERNET_NAME", data, self.__name__, event)
self.notifyListeners(evt)
for co in coHosts:
if eventName == "IP_ADDRESS" and (
self.opts["verify"] and not self.sf.validateIP(co, eventData)
):
self.debug("Host no longer resolves to our IP.")
continue
if not self.opts["cohostsamedomain"]:
if self.getTarget().matches(co, includeParents=True):
self.debug(
"Skipping " + co + " because it is on the same domain."
)
continue
if self.cohostcount < self.opts["maxcohost"]:
evt = SpiderFootEvent("CO_HOSTED_SITE", co, self.__name__, event)
self.notifyListeners(evt)
self.cohostcount += 1
# End of sfp_dnsdb class
| mit | 08b0483e8573ad601af59cfa59e867b4 | 35.684685 | 122 | 0.489522 | 4.43412 | false | false | false | false |
smicallef/spiderfoot | spiderfoot/plugin.py | 1 | 19747 | from contextlib import suppress
import io
import logging
import os
import queue
import sys
import threading
from time import sleep
import traceback
from .threadpool import SpiderFootThreadPool
# begin logging overrides
# these are copied from the python logging module
# https://github.com/python/cpython/blob/main/Lib/logging/__init__.py
if hasattr(sys, 'frozen'): # support for py2exe
_srcfile = f"logging{os.sep}__init__{__file__[-4:]}"
elif __file__[-4:].lower() in ['.pyc', '.pyo']:
_srcfile = __file__[:-4] + '.py'
else:
_srcfile = __file__
_srcfile = os.path.normcase(_srcfile)
class SpiderFootPluginLogger(logging.Logger):
"""Used only in SpiderFootPlugin to prevent modules
from having to initialize their own loggers.
Preserves filename, module, line numbers, etc. from the caller.
"""
def findCaller(self, stack_info: bool = False, stacklevel: int = 1) -> tuple:
"""Find the stack frame of the caller so that we can note the source
file name, line number and function name.
Args:
stack_info (bool): TBD
stacklevel (int): TBD
Returns:
tuple: filename, line number, module name, and stack trace
"""
f = logging.currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
orig_f = f
while f and stacklevel > 1:
f = f.f_back
stacklevel -= 1
if not f:
f = orig_f
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename in (logging._srcfile, _srcfile): # This is the only change
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv # noqa R504
# end of logging overrides
class SpiderFootPlugin():
"""SpiderFootPlugin module object
Attributes:
_stopScanning (bool): Will be set to True by the controller if the user aborts scanning
listenerModules (list): Modules that will be notified when this module produces events
_currentEvent (SpiderFootEvent): Current event being processed
_currentTarget (str): Target currently being acted against
_name_: Name of this module, set at startup time
__sfdb__: Direct handle to the database - not to be directly used
by modules except the sfp__stor_db module.
__scanId__: ID of the scan the module is running against
__datasource__: (Unused) tracking of data sources
__outputFilter: If set, events not matching this list are dropped
_priority (int): Priority, smaller numbers should run first
errorState (bool): error state of the module
socksProxy (str): SOCKS proxy
"""
# Will be set to True by the controller if the user aborts scanning
_stopScanning = False
# Modules that will be notified when this module produces events
_listenerModules = list()
# Current event being processed
_currentEvent = None
# Target currently being acted against
_currentTarget = None
# Name of this module, set at startup time
__name__ = "module_name_not_set!"
# Direct handle to the database - not to be directly used
# by modules except the sfp__stor_db module.
__sfdb__ = None
# ID of the scan the module is running against
__scanId__ = None
# (only used in SpiderFoot HX) tracking of data sources
__dataSource__ = None
# If set, events not matching this list are dropped
__outputFilter__ = None
# Priority, smaller numbers should run first
_priority = 1
# Plugin meta information
meta = None
# Error state of the module
errorState = False
# SOCKS proxy
socksProxy = None
# Queue for incoming events
incomingEventQueue = None
# Queue for produced events
outgoingEventQueue = None
# SpiderFoot object, set in each module's setup() function
sf = None
# Configuration, set in each module's setup() function
opts = dict()
# Maximum threads
maxThreads = 1
def __init__(self) -> None:
# Holds the thread object when module threading is enabled
self.thread = None
# logging overrides
self._log = None
# Shared thread pool for all modules
self.sharedThreadPool = None
@property
def log(self):
if self._log is None:
logging.setLoggerClass(SpiderFootPluginLogger) # temporarily set logger class
self._log = logging.getLogger(f"spiderfoot.{self.__name__}") # init SpiderFootPluginLogger
logging.setLoggerClass(logging.Logger) # reset logger class to default
return self._log
def _updateSocket(self, socksProxy: str) -> None:
"""Hack to override module's use of socket, replacing it with
one that uses the supplied SOCKS server.
Args:
socksProxy (str): SOCKS proxy
"""
self.socksProxy = socksProxy
def clearListeners(self) -> None:
"""Used to clear any listener relationships, etc. This is needed because
Python seems to cache local variables even between threads."""
self._listenerModules = list()
self._stopScanning = False
def setup(self, sf, userOpts: dict = {}) -> None:
"""Will always be overriden by the implementer.
Args:
sf (SpiderFoot): SpiderFoot object
userOpts (dict): TBD
"""
pass
def debug(self, *args, **kwargs) -> None:
"""For logging.
A wrapper around logging.debug() that adds the scanId to LogRecord
Args:
*args: passed through to logging.debug()
*kwargs: passed through to logging.debug()
"""
self.log.debug(*args, extra={'scanId': self.__scanId__}, **kwargs)
def info(self, *args, **kwargs) -> None:
"""For logging.
A wrapper around logging.info() that adds the scanId to LogRecord
Args:
*args: passed through to logging.info()
*kwargs: passed through to logging.info()
"""
self.log.info(*args, extra={'scanId': self.__scanId__}, **kwargs)
def error(self, *args, **kwargs) -> None:
"""For logging.
A wrapper around logging.error() that adds the scanId to LogRecord
Args:
*args: passed through to logging.error()
*kwargs: passed through to logging.error()
"""
self.log.error(*args, extra={'scanId': self.__scanId__}, **kwargs)
def enrichTarget(self, target: str) -> None:
"""Find aliases for a target.
Note: rarely used in special cases
Args:
target (str): TBD
"""
pass
def setTarget(self, target) -> None:
"""Assigns the current target this module is acting against.
Args:
target (SpiderFootTarget): target
Raises:
TypeError: target argument was invalid type
"""
from spiderfoot import SpiderFootTarget
if not isinstance(target, SpiderFootTarget):
raise TypeError(f"target is {type(target)}; expected SpiderFootTarget")
self._currentTarget = target
def setDbh(self, dbh) -> None:
"""Used to set the database handle, which is only to be used
by modules in very rare/exceptional cases (e.g. sfp__stor_db)
Args:
dbh (SpiderFootDb): database handle
"""
self.__sfdb__ = dbh
def setScanId(self, scanId: str) -> None:
"""Set the scan ID.
Args:
scanId (str): scan instance ID
Raises:
TypeError: scanId argument was invalid type
"""
if not isinstance(scanId, str):
raise TypeError(f"scanId is {type(scanId)}; expected str")
self.__scanId__ = scanId
def getScanId(self) -> str:
"""Get the scan ID.
Returns:
str: scan ID
Raises:
TypeError: Module called getScanId() but no scanId is set.
"""
if not self.__scanId__:
raise TypeError("Module called getScanId() but no scanId is set.")
return self.__scanId__
def getTarget(self) -> str:
"""Gets the current target this module is acting against.
Returns:
str: current target
Raises:
TypeError: Module called getTarget() but no target is set.
"""
if not self._currentTarget:
raise TypeError("Module called getTarget() but no target is set.")
return self._currentTarget
def registerListener(self, listener) -> None:
"""Listener modules which will get notified once we have data for them to
work with.
Args:
listener: TBD
"""
self._listenerModules.append(listener)
def setOutputFilter(self, types) -> None:
self.__outputFilter__ = types
def tempStorage(self) -> dict:
"""For future use. Module temporary storage.
A dictionary used to persist state (in memory) for a module.
Todo:
Move all module state to use this, which then would enable a scan to be paused/resumed.
Note:
Required for SpiderFoot HX compatibility of modules.
Returns:
dict: module temporary state data
"""
return dict()
def notifyListeners(self, sfEvent) -> None:
"""Call the handleEvent() method of every other plug-in listening for
events from this plug-in. Remember that those plug-ins will be called
within the same execution context of this thread, not on their own.
Args:
sfEvent (SpiderFootEvent): event
Raises:
TypeError: sfEvent argument was invalid type
"""
from spiderfoot import SpiderFootEvent
if not isinstance(sfEvent, SpiderFootEvent):
raise TypeError(f"sfEvent is {type(sfEvent)}; expected SpiderFootEvent")
eventName = sfEvent.eventType
eventData = sfEvent.data
if self.__outputFilter__:
# Be strict about what events to pass on, unless they are
# the ROOT event or the event type of the target.
if eventName not in ('ROOT', self.getTarget().targetType):
if eventName not in self.__outputFilter__:
return
storeOnly = False # Under some conditions, only store and don't notify
if not eventData:
return
if self.checkForStop():
return
# Look back to ensure the original notification for an element
# is what's linked to children. For instance, sfp_dns may find
# xyz.abc.com, and then sfp_ripe obtains some raw data for the
# same, and then sfp_dns finds xyz.abc.com in there, we should
# suppress the notification of that to other modules, as the
# original xyz.abc.com notification from sfp_dns will trigger
# those modules anyway. This also avoids messy iterations that
# traverse many many levels.
# storeOnly is used in this case so that the source to dest
# relationship is made, but no further events are triggered
# from dest, as we are already operating on dest's original
# notification from one of the upstream events.
prevEvent = sfEvent.sourceEvent
while prevEvent is not None:
if prevEvent.sourceEvent is not None:
if prevEvent.sourceEvent.eventType == sfEvent.eventType and prevEvent.sourceEvent.data.lower() == eventData.lower():
storeOnly = True
break
prevEvent = prevEvent.sourceEvent
# output to queue if applicable
if self.outgoingEventQueue is not None:
self.outgoingEventQueue.put(sfEvent)
# otherwise, call other modules directly
else:
self._listenerModules.sort(key=lambda m: m._priority)
for listener in self._listenerModules:
if eventName not in listener.watchedEvents() and '*' not in listener.watchedEvents():
continue
if storeOnly and "__stor" not in listener.__module__:
continue
listener._currentEvent = sfEvent
# Check if we've been asked to stop in the meantime, so that
# notifications stop triggering module activity.
if self.checkForStop():
return
try:
listener.handleEvent(sfEvent)
except Exception as e:
self.sf.error(f"Module ({listener.__module__}) encountered an error: {e}")
# set errorState
self.errorState = True
# clear incoming queue
if self.incomingEventQueue:
with suppress(queue.Empty):
while 1:
self.incomingEventQueue.get_nowait()
def checkForStop(self) -> bool:
"""For modules to use to check for when they should give back control.
Returns:
bool: True if scan should stop
"""
# Stop if module is in error state.
if self.errorState:
return True
# If threading is enabled, check the _stopScanning attribute instead.
# This is to prevent each thread needing its own sqlite db handle.
if self.outgoingEventQueue is not None and self.incomingEventQueue is not None:
return self._stopScanning
if not self.__scanId__:
return False
scanstatus = self.__sfdb__.scanInstanceGet(self.__scanId__)
if not scanstatus:
return False
if scanstatus[5] == "ABORT-REQUESTED":
self._stopScanning = True
return True
return False
@property
def running(self) -> bool:
"""Indicates whether the module is currently processing data.
Modules that process data in pools/batches typically override this method.
Returns:
bool: True if the module is currently processing data.
"""
return self.sharedThreadPool.countQueuedTasks(f"{self.__name__}_threadWorker") > 0
def watchedEvents(self) -> list:
"""What events is this module interested in for input. The format is a list
of event types that are applied to event types that this module wants to
be notified of, or * if it wants everything.
Will usually be overriden by the implementer, unless it is interested
in all events (default behavior).
Returns:
list: list of events this modules watches
"""
return ['*']
def producedEvents(self) -> list:
"""What events this module produces
This is to support the end user in selecting modules based on events
produced.
Returns:
list: list of events produced by this module
"""
return []
def handleEvent(self, sfEvent) -> None:
"""Handle events to this module.
Will usually be overriden by the implementer, unless it doesn't handle any events.
Args:
sfEvent (SpiderFootEvent): event
"""
return
def asdict(self) -> dict:
return {
'name': self.meta.get('name'),
'descr': self.meta.get('summary'),
'cats': self.meta.get('categories', []),
'group': self.meta.get('useCases', []),
'labels': self.meta.get('flags', []),
'provides': self.producedEvents(),
'consumes': self.watchedEvents(),
'meta': self.meta,
'opts': self.opts,
'optdescs': self.optdescs,
}
def start(self) -> None:
self.thread = threading.Thread(target=self.threadWorker)
self.thread.start()
def finish(self):
"""Perform final/cleanup functions before module exits
Note that this function may be called multiple times
Overridden by the implementer
"""
return
def threadWorker(self) -> None:
try:
# create new database handle since we're in our own thread
from spiderfoot import SpiderFootDb
self.setDbh(SpiderFootDb(self.opts))
self.sf._dbh = self.__sfdb__
if not (self.incomingEventQueue and self.outgoingEventQueue):
self.sf.error("Please set up queues before starting module as thread")
return
while not self.checkForStop():
try:
sfEvent = self.incomingEventQueue.get_nowait()
except queue.Empty:
sleep(.3)
continue
if sfEvent == 'FINISHED':
self.sf.debug(f"{self.__name__}.threadWorker() got \"FINISHED\" from incomingEventQueue.")
self.poolExecute(self.finish)
else:
self.sf.debug(f"{self.__name__}.threadWorker() got event, {sfEvent.eventType}, from incomingEventQueue.")
self.poolExecute(self.handleEvent, sfEvent)
except KeyboardInterrupt:
self.sf.debug(f"Interrupted module {self.__name__}.")
self._stopScanning = True
except Exception as e:
import traceback
self.sf.error(f"Exception ({e.__class__.__name__}) in module {self.__name__}."
+ traceback.format_exc())
# set errorState
self.sf.debug(f"Setting errorState for module {self.__name__}.")
self.errorState = True
# clear incoming queue
if self.incomingEventQueue:
self.sf.debug(f"Emptying incomingEventQueue for module {self.__name__}.")
with suppress(queue.Empty):
while 1:
self.incomingEventQueue.get_nowait()
# set queue to None to prevent its use
# if there are leftover objects in the queue, the scan will hang.
self.incomingEventQueue = None
def poolExecute(self, callback, *args, **kwargs) -> None:
"""Execute a callback with the given args.
If we're in a storage module, execute normally.
Otherwise, use the shared thread pool.
Args:
callback: function to call
args: args (passed through to callback)
kwargs: kwargs (passed through to callback)
"""
if self.__name__.startswith('sfp__stor_'):
callback(*args, **kwargs)
else:
self.sharedThreadPool.submit(callback, *args, taskName=f"{self.__name__}_threadWorker", maxThreads=self.maxThreads, **kwargs)
def threadPool(self, *args, **kwargs):
return SpiderFootThreadPool(*args, **kwargs)
def setSharedThreadPool(self, sharedThreadPool) -> None:
self.sharedThreadPool = sharedThreadPool
# end of SpiderFootPlugin class
| mit | b50f684dcfd54fac8e28329ab235825b | 33.950442 | 137 | 0.590571 | 4.510507 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_errors.py | 1 | 3664 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_errors
# Purpose: Identify common error messages in content like SQL errors, etc.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 18/01/2015
# Copyright: (c) Steve Micallef 2015
# Licence: MIT
# -------------------------------------------------------------------------------
import re
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
# Taken from Google Dorks on exploit-db.com
regexps = dict({
"PHP Error": ["PHP pase error", "PHP warning", "PHP error",
"unexpected T_VARIABLE", "warning: failed opening", "include_path="],
"Generic Error": ["Internal Server Error", "Incorrect syntax"],
"Oracle Error": [r"ORA-\d+", "TNS:.?no listen"],
"ASP Error": ["NET_SessionId"],
"MySQL Error": [r"mysql_query\(", r"mysql_connect\("],
"ODBC Error": [r"\[ODBC SQL"]
})
class sfp_errors(SpiderFootPlugin):
meta = {
'name': "Error String Extractor",
'summary': "Identify common error messages in content like SQL errors, etc.",
'flags': [],
'useCases': ["Footprint", "Passive"],
'categories': ["Content Analysis"]
}
# Default options
opts = {}
# Option descriptions
optdescs = {
# For each option in opts you should have a key/value pair here
# describing it. It will end up in the UI to explain the option
# to the end-user.
}
# Target
results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.__dataSource__ = "Target Website"
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
# * = be notified about all events.
def watchedEvents(self):
return ["TARGET_WEB_CONTENT"]
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["ERROR_MESSAGE"]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
# We only want web content from the target
if srcModuleName != "sfp_spider":
return
eventSource = event.actualSource
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if eventSource not in list(self.results.keys()):
self.results[eventSource] = list()
# We only want web content for pages on the target site
if not self.getTarget().matches(self.sf.urlFQDN(eventSource)):
self.debug("Not collecting web content information for external sites.")
return
for regexpGrp in list(regexps.keys()):
if regexpGrp in self.results[eventSource]:
continue
for regex in regexps[regexpGrp]:
pat = re.compile(regex, re.IGNORECASE)
matches = re.findall(pat, eventData)
if len(matches) > 0 and regexpGrp not in self.results[eventSource]:
self.info("Matched " + regexpGrp + " in content from " + eventSource)
self.results[eventSource] = self.results[eventSource] + [regexpGrp]
evt = SpiderFootEvent("ERROR_MESSAGE", regexpGrp,
self.__name__, event)
self.notifyListeners(evt)
# End of sfp_errors class
| mit | ef0ffb38430a9f4acc39315fbe433a14 | 32.925926 | 89 | 0.572871 | 4.103024 | false | false | false | false |
smicallef/spiderfoot | test/unit/modules/test_sfp_bitcoin.py | 1 | 4245 | import pytest
import unittest
from modules.sfp_bitcoin import sfp_bitcoin
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleBitcoin(unittest.TestCase):
def test_opts(self):
module = sfp_bitcoin()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_bitcoin()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_bitcoin()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_bitcoin()
self.assertIsInstance(module.producedEvents(), list)
def test_handleEvent_event_data_containing_bitcoin_string_in_legacy_base58_format_should_return_event(self):
sf = SpiderFoot(self.default_options)
module = sfp_bitcoin()
module.setup(sf, dict())
target_value = 'spiderfoot.net'
target_type = 'INTERNET_NAME'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
def new_notifyListeners(self, event):
expected = 'BITCOIN_ADDRESS'
if str(event.eventType) != expected:
raise Exception(f"{event.eventType} != {expected}")
expected = '1HesYJSP1QqcyPEjnQ9vzBL1wujruNGe7R'
if str(event.data) != expected:
raise Exception(f"{event.data} != {expected}")
raise Exception("OK")
module.notifyListeners = new_notifyListeners.__get__(module, sfp_bitcoin)
event_type = 'ROOT'
event_data = 'example data 1HesYJSP1QqcyPEjnQ9vzBL1wujruNGe7R example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
with self.assertRaises(Exception) as cm:
module.handleEvent(evt)
self.assertEqual("OK", str(cm.exception))
def test_handleEvent_event_data_containing_bitcoin_string_in_bech32_format_should_return_event(self):
sf = SpiderFoot(self.default_options)
module = sfp_bitcoin()
module.setup(sf, dict())
target_value = 'spiderfoot.net'
target_type = 'INTERNET_NAME'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
def new_notifyListeners(self, event):
expected = 'BITCOIN_ADDRESS'
if str(event.eventType) != expected:
raise Exception(f"{event.eventType} != {expected}")
expected = 'bc1q4r8h8vqk02gnvlus758qmpk8jmajpy2ld23xtr73a39ps0r9z82qq0qqye'
if str(event.data) != expected:
raise Exception(f"{event.data} != {expected}")
raise Exception("OK")
module.notifyListeners = new_notifyListeners.__get__(module, sfp_bitcoin)
event_type = 'ROOT'
event_data = 'example data bc1q4r8h8vqk02gnvlus758qmpk8jmajpy2ld23xtr73a39ps0r9z82qq0qqye example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
with self.assertRaises(Exception) as cm:
module.handleEvent(evt)
self.assertEqual("OK", str(cm.exception))
def test_handleEvent_event_data_not_containing_bitcoin_string_should_not_return_event(self):
sf = SpiderFoot(self.default_options)
module = sfp_bitcoin()
module.setup(sf, dict())
target_value = 'spiderfoot.net'
target_type = 'INTERNET_NAME'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
def new_notifyListeners(self, event):
raise Exception(f"Raised event {event.eventType}: {event.data}")
module.notifyListeners = new_notifyListeners.__get__(module, sfp_bitcoin)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
| mit | 09325ea26836b3515e552e23a5312b5f | 32.96 | 112 | 0.644287 | 3.576243 | false | true | false | false |
smicallef/spiderfoot | modules/sfp_tool_dnstwist.py | 1 | 5459 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_tool_dnstwist
# Purpose: SpiderFoot plug-in for using the 'dnstwist' tool.
# Tool: https://github.com/elceef/dnstwist
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 12/11/2018
# Copyright: (c) Steve Micallef 2018
# Licence: MIT
# -------------------------------------------------------------------------------
import json
from pathlib import Path
from shutil import which
from subprocess import PIPE, Popen
from spiderfoot import SpiderFootEvent, SpiderFootPlugin, SpiderFootHelpers
class sfp_tool_dnstwist(SpiderFootPlugin):
meta = {
'name': "Tool - DNSTwist",
'summary': "Identify bit-squatting, typo and other similar domains to the target using a local DNSTwist installation.",
'flags': ["tool"],
'useCases': ["Footprint", "Investigate"],
'categories': ["DNS"],
'toolDetails': {
'name': "DNSTwist",
'description': "See what sort of trouble users can get in trying to type your domain name. "
"Find lookalike domains that adversaries can use to attack you. "
"Can detect typosquatters, phishing attacks, fraud, and brand impersonation. "
"Useful as an additional source of targeted threat intelligence.",
'website': 'https://github.com/elceef/dnstwist',
'repository': 'https://github.com/elceef/dnstwist'
},
}
# Default options
opts = {
'pythonpath': "python",
'dnstwistpath': ""
}
# Option descriptions
optdescs = {
'pythonpath': "Path to Python interpreter to use for DNSTwist. If just 'python' then it must be in your PATH.",
'dnstwistpath': "Path to the where the dnstwist.py file lives. Optional."
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.errorState = False
self.__dataSource__ = "DNS"
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ['DOMAIN_NAME']
# What events this module produces
# This is to support the end user in selecting modules based on events
# produced.
def producedEvents(self):
return ["SIMILARDOMAIN"]
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if self.errorState:
return
if eventData in self.results:
self.debug("Skipping " + eventData + " as already scanned.")
return
self.results[eventData] = True
dnstwistLocation = which('dnstwist')
if dnstwistLocation and Path(dnstwistLocation).is_file():
cmd = ['dnstwist']
else:
if not self.opts['dnstwistpath']:
self.error("You enabled sfp_tool_dnstwist but did not set a path to the tool!")
self.errorState = True
return
# Normalize path
if self.opts['dnstwistpath'].endswith('dnstwist.py'):
exe = self.opts['dnstwistpath']
elif self.opts['dnstwistpath'].endswith('/'):
exe = self.opts['dnstwistpath'] + "dnstwist.py"
else:
exe = self.opts['dnstwistpath'] + "/dnstwist.py"
# If tool is not found, abort
if not Path(exe).is_file():
self.error("File does not exist: " + exe)
self.errorState = True
return
cmd = [self.opts['pythonpath'], exe]
# Sanitize domain name.
if not SpiderFootHelpers.sanitiseInput(eventData):
self.error("Invalid input, refusing to run.")
return
try:
p = Popen(cmd + ["-f", "json", "-r", eventData], stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(input=None)
if p.returncode == 0:
content = stdout
else:
self.error("Unable to read DNSTwist content.")
self.debug("Error running DNSTwist: " + stderr + ", " + stdout)
return
# For each line in output, generate a SIMILARDOMAIN event
try:
j = json.loads(content)
for r in j:
# Support different formats from DNStwist versions
domain = r.get('domain-name')
if not domain:
domain = r.get('domain')
if self.getTarget().matches(domain, includeParents=True):
continue
evt = SpiderFootEvent("SIMILARDOMAIN", domain,
self.__name__, event)
self.notifyListeners(evt)
except Exception as e:
self.error("Couldn't parse the JSON output of DNSTwist: " + str(e))
return
except Exception as e:
self.error("Unable to run DNSTwist: " + str(e))
return
# End of sfp_tool_dnstwist class
| mit | 5ec4e3498e3331d717b8635f940481dc | 34.679739 | 127 | 0.546987 | 4.089139 | false | false | false | false |
smicallef/spiderfoot | test/unit/modules/test_sfp_fraudguard.py | 1 | 1479 | import pytest
import unittest
from modules.sfp_fraudguard import sfp_fraudguard
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleFraudguard(unittest.TestCase):
def test_opts(self):
module = sfp_fraudguard()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_fraudguard()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_fraudguard()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_fraudguard()
self.assertIsInstance(module.producedEvents(), list)
def test_handleEvent_no_api_key_should_set_errorState(self):
sf = SpiderFoot(self.default_options)
module = sfp_fraudguard()
module.setup(sf, dict())
target_value = 'example target value'
target_type = 'IP_ADDRESS'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
self.assertTrue(module.errorState)
| mit | 5f53390c9edd0e7bf00a0fcfdb2117e5 | 29.183673 | 81 | 0.670723 | 3.625 | false | true | false | false |
smicallef/spiderfoot | modules/sfp_flickr.py | 1 | 9348 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_flickr
# Purpose: Search Flickr API for domains, URLs and emails related to the
# specified domain.
#
# Author: <bcoles@gmail.com>
#
# Created: 2018-10-08
# Copyright: (c) bcoles 2018
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import re
import time
import urllib.error
import urllib.parse
import urllib.request
from spiderfoot import SpiderFootEvent, SpiderFootHelpers, SpiderFootPlugin
class sfp_flickr(SpiderFootPlugin):
meta = {
'name': "Flickr",
'summary': "Search Flickr for domains, URLs and emails related to the specified domain.",
'flags': [],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Social Media"],
'dataSource': {
'website': "https://www.flickr.com/",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"https://www.flickr.com/services/api/",
"https://www.flickr.com/services/developer/api/",
"https://code.flickr.net/"
],
'favIcon': "https://combo.staticflickr.com/pw/favicon.ico",
'logo': "https://combo.staticflickr.com/pw/favicon.ico",
'description': "Flickr is almost certainly the best online photo management and sharing application in the world.\n "
"On Flickr, members upload photos, share them securely, supplement their photos with "
"metadata like license information, geo-location, people, tags, etc., "
"and interact with their family, friends, contacts or anyone in the community. "
"Practically all the features on Flickr's various platforms -- web, mobile and desktop -- "
"are accompanied by a longstanding API program. "
"Since 2005, developers have collaborated on top of Flickr's APIs to build fun, creative, "
"and gorgeous experiences around photos that extend beyond Flickr.",
}
}
# Default options
opts = {
'pause': 1,
'per_page': 100,
'maxpages': 20,
'dns_resolve': True,
}
# Option descriptions
optdescs = {
'pause': "Number of seconds to pause between fetches.",
'per_page': "Maximum number of results per page.",
'maxpages': "Maximum number of pages of results to fetch.",
'dns_resolve': "DNS resolve each identified domain.",
}
results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["DOMAIN_NAME"]
# What events this module produces
def producedEvents(self):
return ["EMAILADDR", "EMAILADDR_GENERIC", "INTERNET_NAME",
"DOMAIN_NAME", "LINKED_URL_INTERNAL"]
# Retrieve API key
def retrieveApiKey(self):
res = self.sf.fetchUrl("https://www.flickr.com/", timeout=self.opts['_fetchtimeout'], useragent=self.opts['_useragent'])
if res['content'] is None:
return None
keys = re.findall(r'YUI_config.flickr.api.site_key = "([a-zA-Z0-9]+)"', str(res['content']))
if not keys:
return None
return keys[0]
# Query the REST API
def query(self, qry, api_key, page=1, per_page=200):
params = {
"sort": "relevance",
"parse_tags": "1",
"content_type": "7",
"extras": "description,owner_name,path_alias,realname",
"hermes": "1",
"hermesClient": "1",
"reqId": "",
"nojsoncallback": "1",
"viewerNSID": "",
"method": "flickr.photos.search",
"csrf": "",
"lang": "en-US",
"per_page": str(per_page),
"page": str(page),
"text": qry.encode('raw_unicode_escape').decode("ascii", errors='replace'),
"api_key": api_key,
"format": "json"
}
res = self.sf.fetchUrl("https://api.flickr.com/services/rest?" + urllib.parse.urlencode(params),
useragent=self.opts['_useragent'],
timeout=self.opts['_fetchtimeout'])
time.sleep(self.opts['pause'])
try:
return json.loads(res['content'])
except Exception as e:
self.debug(f"Error processing JSON response: {e}")
return None
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked")
return
self.results[eventData] = True
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if srcModuleName == 'sfp_flickr':
self.debug(f"Ignoring {eventData}, from self.")
return
# Retrieve API key
api_key = self.retrieveApiKey()
if not api_key:
self.error("Failed to obtain API key")
return
self.debug(f"Retrieved API key: {api_key}")
# Query API for event data
hosts = list()
page = 1
pages = self.opts['maxpages']
per_page = self.opts['per_page']
while page <= pages:
if self.checkForStop():
return
if self.errorState:
return
data = self.query(eventData, api_key, page=page, per_page=per_page)
if data is None:
return
# Check the response is ok
if data.get('stat') != "ok":
self.debug("Error retrieving search results.")
return
photos = data.get('photos')
if not photos:
self.debug("No search results.")
return
# Calculate number of pages to retrieve
result_pages = int(photos.get('pages', 0))
if result_pages < pages:
pages = result_pages
if 'max_allowed_pages' in photos:
allowed_pages = int(photos.get('max_allowed_pages', 0))
if pages > allowed_pages:
pages = allowed_pages
self.info(f"Parsing page {page} of {pages}")
# Extract data
for photo in photos.get('photo', list()):
emails = SpiderFootHelpers.extractEmailsFromText(str(photo))
for email in emails:
if email in self.results:
continue
mail_domain = email.lower().split('@')[1]
if not self.getTarget().matches(mail_domain, includeChildren=True, includeParents=True):
self.debug(f"Skipped unrelated address: {email}")
continue
self.info("Found e-mail address: " + email)
if email.split("@")[0] in self.opts['_genericusers'].split(","):
evttype = "EMAILADDR_GENERIC"
else:
evttype = "EMAILADDR"
evt = SpiderFootEvent(evttype, email, self.__name__, event)
self.notifyListeners(evt)
self.results[email] = True
links = SpiderFootHelpers.extractUrlsFromText(str(photo))
for link in links:
if link in self.results:
continue
host = self.sf.urlFQDN(link)
if not self.getTarget().matches(host, includeChildren=True, includeParents=True):
self.debug(f"Skipped unrelated URL: {link}")
continue
hosts.append(host)
self.debug(f"Found a URL: {link}")
evt = SpiderFootEvent('LINKED_URL_INTERNAL', link, self.__name__, event)
self.notifyListeners(evt)
self.results[link] = True
page += 1
for host in set(hosts):
if self.checkForStop():
return
if self.errorState:
return
if self.opts['dns_resolve'] and not self.sf.resolveHost(host) and not self.sf.resolveHost6(host):
self.debug(f"Host {host} could not be resolved")
evt = SpiderFootEvent("INTERNET_NAME_UNRESOLVED", host, self.__name__, event)
self.notifyListeners(evt)
continue
evt = SpiderFootEvent("INTERNET_NAME", host, self.__name__, event)
self.notifyListeners(evt)
if self.sf.isDomain(host, self.opts["_internettlds"]):
evt = SpiderFootEvent("DOMAIN_NAME", host, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_flickr class
| mit | 58fb3e1654e46f824d646860df03c744 | 34.142857 | 129 | 0.523641 | 4.237534 | false | false | false | false |
smicallef/spiderfoot | test/unit/modules/test_sfp_tool_dnstwist.py | 1 | 1515 | import pytest
import unittest
from modules.sfp_tool_dnstwist import sfp_tool_dnstwist
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleToolDnstwist(unittest.TestCase):
def test_opts(self):
module = sfp_tool_dnstwist()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_tool_dnstwist()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_tool_dnstwist()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_tool_dnstwist()
self.assertIsInstance(module.producedEvents(), list)
def test_handleEvent_no_tool_path_configured_should_set_errorState(self):
sf = SpiderFoot(self.default_options)
module = sfp_tool_dnstwist()
module.setup(sf, dict())
target_value = 'example target value'
target_type = 'IP_ADDRESS'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
self.assertTrue(module.errorState)
| mit | b5db7e8508930851eaf088690a71a0fe | 29.918367 | 81 | 0.673267 | 3.498845 | false | true | false | false |
smicallef/spiderfoot | modules/sfp_hunter.py | 1 | 6025 | # -------------------------------------------------------------------------------
# Name: sfp_hunter
# Purpose: Query hunter.io using their API.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 22/02/2017
# Copyright: (c) Steve Micallef
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import urllib.error
import urllib.parse
import urllib.request
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_hunter(SpiderFootPlugin):
meta = {
'name': "Hunter.io",
'summary': "Check for e-mail addresses and names on hunter.io.",
'flags': ["apikey"],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Search Engines"],
'dataSource': {
'website': "https://hunter.io/",
'model': "FREE_AUTH_LIMITED",
'references': [
"https://hunter.io/api"
],
'apiKeyInstructions': [
"Visit https://hunter.io/",
"Sign up for a free account",
"Click on 'Account Settings'",
"Click on 'API'",
"The API key is listed under 'Your API Key'"
],
'favIcon': "https://hunter.io/assets/head/favicon-d5796c45076e78aa5cf22dd53c5a4a54155062224bac758a412f3a849f38690b.ico",
'logo': "https://hunter.io/assets/head/touch-icon-iphone-fd9330e31552eeaa12b177489943de997551bfd991c4c44e8c3d572e78aea5f3.png",
'description': "Hunter lets you find email addresses in seconds and connect with the people that matter for your business.\n"
"The Domain Search lists all the people working in a company with their name "
"and email address found on the web. With 100+ million email addresses indexed, "
"effective search filters and scoring, it's the most powerful email-finding tool ever created.",
}
}
# Default options
opts = {
"api_key": ""
}
# Option descriptions
optdescs = {
"api_key": "Hunter.io API key."
}
# Be sure to completely clear any class variables in setup()
# or you run the risk of data persisting between scan runs.
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.errorState = False
# Clear / reset any other class member variables here
# or you risk them persisting between threads.
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["DOMAIN_NAME", "INTERNET_NAME"]
# What events this module produces
def producedEvents(self):
return ["EMAILADDR", "EMAILADDR_GENERIC", "RAW_RIR_DATA"]
def query(self, qry, offset=0, limit=10):
params = {
"domain": qry.encode('raw_unicode_escape').decode("ascii", errors='replace'),
"api_key": self.opts['api_key'],
"offset": str(offset),
"limit": str(limit)
}
url = f"https://api.hunter.io/v2/domain-search?{urllib.parse.urlencode(params)}"
res = self.sf.fetchUrl(url, timeout=self.opts['_fetchtimeout'], useragent="SpiderFoot")
if res['code'] == "404":
return None
if not res['content']:
return None
try:
return json.loads(res['content'])
except Exception as e:
self.error(f"Error processing JSON response from hunter.io: {e}")
return None
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
self.results[eventData] = True
if self.opts['api_key'] == "":
self.error("You enabled sfp_hunter but did not set an API key!")
self.errorState = True
return
data = self.query(eventData, 0, 10)
if not data:
return
if "data" not in data:
return
# Check if we have more results on further pages
if "meta" in data:
maxgoal = data['meta'].get('results', 10)
else:
maxgoal = 10
rescount = len(data['data'].get('emails', list()))
while rescount <= maxgoal:
for email in data['data'].get('emails', list()):
# Notify other modules of what you've found
em = email.get('value')
if not em:
continue
if em.split("@")[0] in self.opts['_genericusers'].split(","):
evttype = "EMAILADDR_GENERIC"
else:
evttype = "EMAILADDR"
e = SpiderFootEvent(evttype, em, self.__name__, event)
self.notifyListeners(e)
if 'first_name' in email and 'last_name' in email:
if email['first_name'] is not None and email['last_name'] is not None:
n = email['first_name'] + " " + email['last_name']
e = SpiderFootEvent("RAW_RIR_DATA", "Possible full name: " + n,
self.__name__, event)
self.notifyListeners(e)
if rescount >= maxgoal:
return
data = self.query(eventData, rescount, 10)
if data is None:
return
if "data" not in data:
return
rescount += len(data['data'].get('emails', list()))
# End of sfp_hunter class
| mit | f0ea9db56022c46d83ac57cd392ba1c5 | 32.659218 | 139 | 0.539751 | 4.084746 | false | false | false | false |
smicallef/spiderfoot | test/unit/modules/test_sfp_censys.py | 1 | 2495 | import pytest
import unittest
from modules.sfp_censys import sfp_censys
from sflib import SpiderFoot
from spiderfoot import SpiderFootEvent, SpiderFootTarget
@pytest.mark.usefixtures
class TestModuleCensys(unittest.TestCase):
def test_opts(self):
module = sfp_censys()
self.assertEqual(len(module.opts), len(module.optdescs))
def test_setup(self):
sf = SpiderFoot(self.default_options)
module = sfp_censys()
module.setup(sf, dict())
def test_watchedEvents_should_return_list(self):
module = sfp_censys()
self.assertIsInstance(module.watchedEvents(), list)
def test_producedEvents_should_return_list(self):
module = sfp_censys()
self.assertIsInstance(module.producedEvents(), list)
def test_parseApiResponse_nonfatal_http_response_code_should_not_set_errorState(self):
sf = SpiderFoot(self.default_options)
http_codes = ["200", "400", "404"]
for code in http_codes:
with self.subTest(code=code):
module = sfp_censys()
module.setup(sf, dict())
result = module.parseApiResponse({"code": code, "content": None})
self.assertIsNone(result)
self.assertFalse(module.errorState)
def test_parseApiResponse_fatal_http_response_error_code_should_set_errorState(self):
sf = SpiderFoot(self.default_options)
http_codes = ["401", "402", "403", "429", "500", "502", "503"]
for code in http_codes:
with self.subTest(code=code):
module = sfp_censys()
module.setup(sf, dict())
result = module.parseApiResponse({"code": code, "content": None})
self.assertIsNone(result)
self.assertTrue(module.errorState)
def test_handleEvent_no_api_key_should_set_errorState(self):
sf = SpiderFoot(self.default_options)
module = sfp_censys()
module.setup(sf, dict())
target_value = 'example target value'
target_type = 'IP_ADDRESS'
target = SpiderFootTarget(target_value, target_type)
module.setTarget(target)
event_type = 'ROOT'
event_data = 'example data'
event_module = ''
source_event = ''
evt = SpiderFootEvent(event_type, event_data, event_module, source_event)
result = module.handleEvent(evt)
self.assertIsNone(result)
self.assertTrue(module.errorState)
| mit | 9b103981948acc6945dd25fd10992d84 | 33.178082 | 90 | 0.624449 | 3.746246 | false | true | false | false |
smicallef/spiderfoot | modules/sfp_crobat_api.py | 1 | 5317 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_crobat_api
# Purpose: Search Crobat API for subdomains.
# https://www.onsecurity.co.uk/blog/how-i-made-rapid7s-project-sonar-searchable
# https://github.com/cgboal/sonarsearch
#
# Authors: <bcoles@gmail.com>
#
# Created: 2020-08-29
# Copyright: (c) bcoles 2020
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import time
import urllib
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_crobat_api(SpiderFootPlugin):
meta = {
'name': "Crobat API",
'summary': "Search Crobat API for subdomains.",
'flags': [],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Passive DNS"],
'dataSource': {
'website': "https://sonar.omnisint.io/",
'model': "FREE_NOAUTH_UNLIMITED",
'logo': "https://sonar.omnisint.io/img/crobat.png",
'description': "The entire Rapid7 Sonar DNS dataset indexed,"
" available at your fingertips.",
}
}
opts = {
"verify": True,
"max_pages": 10,
"delay": 1
}
optdescs = {
"verify": "DNS resolve each identified subdomain.",
"max_pages": "Maximum number of pages of results to fetch.",
"delay": "Delay between requests, in seconds."
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.errorState = False
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ["DOMAIN_NAME"]
def producedEvents(self):
return ["RAW_RIR_DATA", "INTERNET_NAME", "INTERNET_NAME_UNRESOLVED"]
def queryDomain(self, qry, page=0):
headers = {
"Accept": "application/json"
}
params = urllib.parse.urlencode({
'page': page
})
domain = qry.encode('raw_unicode_escape').decode("ascii", errors='replace')
res = self.sf.fetchUrl(
f"https://sonar.omnisint.io/subdomains/{domain}?{params}",
headers=headers,
timeout=30,
useragent=self.opts['_useragent']
)
time.sleep(self.opts['delay'])
return self.parseApiResponse(res)
def parseApiResponse(self, res: dict):
if not res:
self.error("No response from Crobat API.")
return None
# Future proofing - Crobat API does not implement rate limiting
if res['code'] == '429':
self.error("You are being rate-limited by Crobat API")
self.errorState = True
return None
# Catch all non-200 status codes, and presume something went wrong
if res['code'] != '200':
self.error("Failed to retrieve content from Crobat API")
self.errorState = True
return None
if res['content'] is None:
return None
# returns "null" when page has no data
if res['content'] == "null":
return None
try:
data = json.loads(res['content'])
except Exception as e:
self.debug(f"Error processing JSON response: {e}")
return None
if not isinstance(data, list):
self.error("Failed to retrieve content from Crobat API")
return None
return data
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
if eventData in self.results:
return
self.results[eventData] = True
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if eventName != "DOMAIN_NAME":
return
page = 0
while page < self.opts['max_pages']:
if self.checkForStop():
return
if self.errorState:
return
data = self.queryDomain(eventData, page)
if not data:
self.debug(f"No information found for domain {eventData} (page: {page})")
return
evt = SpiderFootEvent('RAW_RIR_DATA', str(data), self.__name__, event)
self.notifyListeners(evt)
page += 1
for domain in set(data):
if domain in self.results:
continue
if not self.getTarget().matches(domain, includeChildren=True, includeParents=True):
continue
if self.opts['verify'] and not self.sf.resolveHost(domain) and not self.sf.resolveHost6(domain):
self.debug(f"Host {domain} could not be resolved")
evt = SpiderFootEvent("INTERNET_NAME_UNRESOLVED", domain, self.__name__, event)
self.notifyListeners(evt)
else:
evt = SpiderFootEvent("INTERNET_NAME", domain, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_crobat_api class
| mit | 79f6a0687702421104fc5cbc1f3338bc | 29.382857 | 112 | 0.536393 | 4.09 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_adguard_dns.py | 1 | 4148 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_adguard_dns
# Purpose: SpiderFoot plug-in for looking up whether hosts are blocked by
# AdGuard DNS servers.
#
# Author: <bcoles@gmail.com>
#
# Created: 2021-10-11
# Copyright: (c) bcoles 2021
# Licence: MIT
# -------------------------------------------------------------------------------
import dns.resolver
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_adguard_dns(SpiderFootPlugin):
meta = {
'name': "AdGuard DNS",
'summary': "Check if a host would be blocked by AdGuard DNS.",
'flags': [],
'useCases': ["Investigate", "Passive"],
'categories': ["Reputation Systems"],
'dataSource': {
'website': "https://adguard.com/",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"https://adguard.com/en/adguard-dns/overview.html",
],
'favIcon': "https://adguard.com/img/favicons/favicon.ico",
'logo': "https://adguard.com/img/favicons/apple-touch-icon.png",
'description': "AdGuard DNS is a foolproof way to block Internet ads that does not require installing any applications. "
"It is easy to use, absolutely free, easily set up on any device, and provides you with minimal necessary functions "
"to block ads, counters, malicious websites, and adult content."
}
}
opts = {
}
optdescs = {
}
results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return [
"INTERNET_NAME",
"AFFILIATE_INTERNET_NAME",
"CO_HOSTED_SITE"
]
def producedEvents(self):
return [
"BLACKLISTED_INTERNET_NAME",
"BLACKLISTED_AFFILIATE_INTERNET_NAME",
"BLACKLISTED_COHOST",
]
def queryDefaultDNS(self, qaddr):
res = dns.resolver.Resolver()
res.nameservers = ["94.140.14.14", "94.140.15.15"]
try:
return res.resolve(qaddr)
except Exception:
self.debug(f"Unable to resolve {qaddr}")
return None
def queryFamilyDNS(self, qaddr):
res = dns.resolver.Resolver()
res.nameservers = ["94.140.14.15", "94.140.15.16"]
try:
return res.resolve(qaddr)
except Exception:
self.debug(f"Unable to resolve {qaddr}")
return None
def handleEvent(self, event):
eventName = event.eventType
eventData = event.data
self.debug(f"Received event, {eventName}, from {event.module}")
if eventData in self.results:
return
self.results[eventData] = True
if eventName == "INTERNET_NAME":
blacklist_type = "BLACKLISTED_INTERNET_NAME"
elif eventName == "AFFILIATE_INTERNET_NAME":
blacklist_type = "BLACKLISTED_AFFILIATE_INTERNET_NAME"
elif eventName == "CO_HOSTED_SITE":
blacklist_type = "BLACKLISTED_COHOST"
else:
self.debug(f"Unexpected event type {eventName}, skipping")
return
family = self.sf.normalizeDNS(self.queryFamilyDNS(eventData))
default = self.sf.normalizeDNS(self.queryDefaultDNS(eventData))
if not family or not default:
return
if '94.140.14.35' in family:
self.debug(f"{eventData} blocked by AdGuard Family DNS")
evt = SpiderFootEvent(blacklist_type, f"AdGuard - Family Filter [{eventData}]", self.__name__, event)
self.notifyListeners(evt)
if '94.140.14.35' in default:
self.debug(f"{eventData} blocked by AdGuard Default DNS")
evt = SpiderFootEvent(blacklist_type, f"AdGuard - Default Filter [{eventData}]", self.__name__, event)
self.notifyListeners(evt)
# End of sfp_adguard_dns class
| mit | 34acf5498019e938e5aeb53e95df854d | 31.155039 | 133 | 0.562681 | 3.8125 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_mnemonic.py | 1 | 9996 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_mnemonic
# Purpose: SpiderFoot plug-in for retrieving passive DNS information
# from Mnemonic PassiveDNS API.
#
# Author: <bcoles@gmail.com>
#
# Created: 2018-10-12
# Copyright: (c) bcoles 2018
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import time
import urllib.error
import urllib.parse
import urllib.request
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_mnemonic(SpiderFootPlugin):
meta = {
'name': "Mnemonic PassiveDNS",
'summary': "Obtain Passive DNS information from PassiveDNS.mnemonic.no.",
'flags': [],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Passive DNS"],
'dataSource': {
'website': "https://www.mnemonic.no",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"https://www.mnemonic.no/resources/whitepapers/",
"https://www.mnemonic.no/research-and-development/",
"https://docs.mnemonic.no/display/public/API/PassiveDNS+Integration+Guide"
],
'favIcon': "https://www.mnemonic.no/favicon-96x96.png",
'logo': "https://www.mnemonic.no/UI/logo.svg",
'description': "mnemonic helps businesses manage their security risks, "
"protect their data and defend against cyber threats.\n"
"Our expert team of security consultants, product specialists, "
"threat researchers, incident responders and ethical hackers, combined "
"with our Argus security platform ensures we stay ahead of "
"advanced cyberattacks and protect our customers from evolving threats.",
}
}
opts = {
'per_page': 500,
'max_pages': 2,
'timeout': 30,
'maxage': 180, # 6 months
'verify': True,
'cohostsamedomain': False,
'maxcohost': 100
}
optdescs = {
'per_page': "Maximum number of results per page.",
'max_pages': "Maximum number of pages of results to fetch.",
'timeout': "Query timeout, in seconds.",
'maxage': "The maximum age of the data returned, in days, in order to be considered valid.",
'verify': "Verify identified domains still resolve to the associated specified IP address.",
'cohostsamedomain': "Treat co-hosted sites on the same target domain as co-hosting?",
'maxcohost': "Stop reporting co-hosted sites after this many are found, as it would likely indicate web hosting.",
}
cohostcount = 0
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.cohostcount = 0
self.errorState = False
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return [
'IP_ADDRESS',
'IPV6_ADDRESS',
'INTERNET_NAME',
'DOMAIN_NAME'
]
def producedEvents(self):
return [
'IP_ADDRESS',
'IPV6_ADDRESS',
'INTERNAL_IP_ADDRESS',
'CO_HOSTED_SITE',
'INTERNET_NAME',
'DOMAIN_NAME'
]
def query(self, qry, limit=500, offset=0):
"""Query the Mnemonic PassiveDNS v3 API.
Args:
qry (str): domain name or IP address
limit (int): Limit the number of returned values.
offset (int): Skip the initial <offset> number of values in the resultset.
Returns:
dict: results as JSON
"""
params = urllib.parse.urlencode({
'limit': limit,
'offset': offset
})
res = self.sf.fetchUrl(
f"https://api.mnemonic.no/pdns/v3/{qry}?{params}",
timeout=self.opts['timeout'],
useragent=self.opts['_useragent']
)
# Unauthenticated users are limited to 100 requests per minute, and 1000 requests per day.
time.sleep(0.75)
if res['content'] is None:
self.info("No results found for " + qry)
return None
try:
data = json.loads(res['content'])
except Exception as e:
self.debug(f"Error processing JSON response from Mnemonic: {e}")
return None
response_code = data.get('responseCode')
if not response_code:
self.debug("Error retrieving search results.")
return None
if response_code == 402:
self.debug("Error retrieving search results: Resource limit exceeded")
self.errorState = True
return None
if response_code != 200:
self.debug(f"Error retrieving search results: {response_code}")
return None
if 'data' not in data:
self.info(f"No results found for {qry}")
return None
size = data.get('size')
count = data.get('count')
if not count or not size:
self.info(f"No results found for {qry}")
return None
self.info(f"Retrieved {size} of {count} results")
return data['data']
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
self.results[eventData] = True
self.debug(f"Received event, {eventName}, from {srcModuleName}")
position = 0
max_pages = int(self.opts['max_pages'])
per_page = int(self.opts['per_page'])
agelimit = int(time.time() * 1000) - (86400000 * self.opts['maxage'])
self.cohostcount = 0
cohosts = list()
while position < (per_page * max_pages):
if self.checkForStop():
break
if self.errorState:
break
data = self.query(eventData, limit=per_page, offset=position)
if data is None:
self.info(f"No passive DNS data found for {eventData}")
break
position += per_page
for r in data:
if "*" in r['query'] or "%" in r['query']:
continue
if r['lastSeenTimestamp'] < agelimit:
self.debug(f"Record {r['answer']} found for {r['query']} is too old, skipping.")
continue
if eventName in ['IP_ADDRESS']:
if r['rrtype'] == 'a':
if self.sf.validIP(r['query']):
cohosts.append(r['query'])
continue
if eventName in ['INTERNET_NAME', 'DOMAIN_NAME']:
# Ignore PTR records
if r['rrtype'] == 'ptr':
continue
if r['rrtype'] == 'cname':
if not self.getTarget().matches(r['query'], includeParents=True):
continue
cohosts.append(r['query'])
if self.opts['verify']:
continue
answer = r.get('answer')
if r['rrtype'] == 'a':
if not self.sf.validIP(answer):
continue
if self.sf.isValidLocalOrLoopbackIp(answer):
evt = SpiderFootEvent("INTERNAL_IP_ADDRESS", answer, self.__name__, event)
else:
evt = SpiderFootEvent("IP_ADDRESS", answer, self.__name__, event)
self.notifyListeners(evt)
if r['rrtype'] == 'aaaa':
if not self.sf.validIP6(r['answer']):
continue
if self.sf.isValidLocalOrLoopbackIp(answer):
evt = SpiderFootEvent("INTERNAL_IP_ADDRESS", answer, self.__name__, event)
else:
evt = SpiderFootEvent("IPV6_ADDRESS", answer, self.__name__, event)
self.notifyListeners(evt)
for co in set(cohosts):
if self.checkForStop():
return
if co in self.results:
continue
if eventName in ["IP_ADDRESS", "IPV6_ADDRESS"]:
if self.opts['verify'] and not self.sf.validateIP(co, eventData):
self.debug(f"Host {co} no longer resolves to {eventData}")
continue
if self.opts['cohostsamedomain']:
if self.cohostcount < self.opts['maxcohost']:
evt = SpiderFootEvent("CO_HOSTED_SITE", co, self.__name__, event)
self.notifyListeners(evt)
self.cohostcount += 1
continue
if self.getTarget().matches(co, includeParents=True):
if self.opts['verify'] and not self.sf.resolveHost(co) and not self.sf.resolveHost6(co):
self.debug(f"Host {co} could not be resolved")
evt = SpiderFootEvent("INTERNET_NAME_UNRESOLVED", co, self.__name__, event)
self.notifyListeners(evt)
continue
evt = SpiderFootEvent("INTERNET_NAME", co, self.__name__, event)
self.notifyListeners(evt)
if self.sf.isDomain(co, self.opts['_internettlds']):
evt = SpiderFootEvent("DOMAIN_NAME", co, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_mnemonic class
| mit | 3d0faa3c355c94be5f352d5891462d51 | 33.708333 | 122 | 0.517707 | 4.30491 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_focsec.py | 1 | 6040 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_focsec
# Purpose: Look up IP address information from Focsec.
#
# Author: <bcoles@gmail.com>
#
# Created: 2021-10-09
# Copyright: (c) bcoles 2021
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import urllib
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_focsec(SpiderFootPlugin):
meta = {
'name': "Focsec",
'summary': "Look up IP address information from Focsec.",
'flags': ['apikey'],
'useCases': ["Passive", "Footprint", "Investigate"],
'categories': ["Search Engines"],
'dataSource': {
'website': "https://focsec.com/",
'model': "FREE_AUTH_LIMITED",
'references': [
"https://docs.focsec.com/#ip",
],
"apiKeyInstructions": [
"Visit https://focsec.com/signup",
"Register an account",
"Visit https://focsec.com/account/dashboard and use the API key provided",
],
'favIcon': "https://focsec.com/static/favicon.png",
'logo': "https://focsec.com/static/web/images/logo.png",
'description': "Our API lets you know if a user's IP address is associated with a VPN, Proxy, TOR or malicious bots."
"Take your applications security to the next level by detecting suspicious activity early on."
}
}
opts = {
"api_key": "",
}
optdescs = {
"api_key": "Focsec API Key.",
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.errorState = False
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return [
"IP_ADDRESS",
"IPV6_ADDRESS"
]
def producedEvents(self):
return [
"RAW_RIR_DATA",
"GEOINFO",
"MALICIOUS_IPADDR",
"PROXY_HOST",
"VPN_HOST",
"TOR_EXIT_NODE",
]
def query(self, qry):
"""Retrieve IP address information from Focsec.
Args:
qry (str): IPv4/IPv6 address
Returns:
dict: JSON formatted results
"""
params = urllib.parse.urlencode({
'api_key': self.opts["api_key"],
})
res = self.sf.fetchUrl(
f"https://api.focsec.com/v1/ip/{qry}?{params}",
timeout=self.opts["_fetchtimeout"],
useragent=self.opts['_useragent']
)
if not res:
self.error("No response from Focsec.")
return None
if res['code'] == "400":
self.error("Bad request.")
self.errorState = True
return None
if res['code'] == "401":
self.error("Unauthorized - Invalid API key.")
self.errorState = True
return None
if res['code'] == "402":
self.error("Unauthorized - Payment Required. Subscription or trial period expired.")
self.errorState = True
return None
if res['code'] == "404":
self.debug(f"No results for {qry}")
return None
# Future proofing - Focsec does not implement rate limiting
if res['code'] == "429":
self.error("You are being rate-limited by Focsec.")
return None
if res['code'] != "200":
self.error(f"Unexpected HTTP response code {res['code']} from Focsec.")
return None
if not res['content']:
self.debug("No results from Focsec.")
return None
try:
return json.loads(res['content'])
except Exception as e:
self.debug(f"Error processing JSON response: {e}")
return None
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
self.results[eventData] = True
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if self.opts["api_key"] == "":
self.error(
f"You enabled {self.__class__.__name__} but did not set an API key!"
)
self.errorState = True
return
data = self.query(eventData)
if not data:
self.debug(f"Found no results for {eventData}")
return
e = SpiderFootEvent("RAW_RIR_DATA", str(data), self.__name__, event)
self.notifyListeners(e)
is_bot = data.get('is_bot')
if is_bot:
e = SpiderFootEvent("MALICIOUS_IPADDR", f"Focsec [{eventData}]", self.__name__, event)
self.notifyListeners(e)
is_tor = data.get('is_tor')
if is_tor:
e = SpiderFootEvent("TOR_EXIT_NODE", eventData, self.__name__, event)
self.notifyListeners(e)
is_vpn = data.get('is_vpn')
if is_vpn:
e = SpiderFootEvent("VPN_HOST", eventData, self.__name__, event)
self.notifyListeners(e)
is_proxy = data.get('is_proxy')
if is_proxy:
e = SpiderFootEvent("PROXY_HOST", eventData, self.__name__, event)
self.notifyListeners(e)
location = ', '.join(
filter(
None,
[
data.get('city'),
data.get('country'),
]
)
)
if location:
e = SpiderFootEvent("GEOINFO", location, self.__name__, event)
self.notifyListeners(e)
# End of sfp_focsec class
| mit | ace8566aabf3448001b7e35e5076023d | 27.899522 | 129 | 0.505629 | 4.042838 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_surbl.py | 1 | 7751 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_surbl
# Purpose: SpiderFoot plug-in to check whether IP addresses, netblocks, and
# domains appear in the SURBL blacklist.
#
# Author: <bcoles@gmail.com>
#
# Created: 2021-10-17
# Copyright: (c) bcoles 2021
# Licence: MIT
# -------------------------------------------------------------------------------
from netaddr import IPNetwork
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_surbl(SpiderFootPlugin):
meta = {
'name': "SURBL",
'summary': "Check if a netblock, IP address or domain is in the SURBL blacklist.",
'flags': [],
'useCases': ["Investigate", "Passive"],
'categories': ["Reputation Systems"],
'dataSource': {
'website': "http://www.surbl.org/",
'model': "FREE_NOAUTH_UNLIMITED", # 250,000 messages per day
'references': [
"http://www.surbl.org/lists",
"http://www.surbl.org/guidelines",
],
'logo': "http://www.surbl.org/images/logo.png",
'description': "SURBLs are lists of web sites that have appeared in unsolicited messages. "
"Unlike most lists, SURBLs are not lists of message senders."
}
}
opts = {
'checkaffiliates': True,
'checkcohosts': True,
'netblocklookup': True,
'maxnetblock': 24,
'subnetlookup': True,
'maxsubnet': 24
}
optdescs = {
'checkaffiliates': "Apply checks to affiliates?",
'checkcohosts': "Apply checks to sites found to be co-hosted on the target's IP?",
'netblocklookup': "Look up all IPs on netblocks deemed to be owned by your target for possible blacklisted hosts on the same target subdomain/domain?",
'maxnetblock': "If looking up owned netblocks, the maximum netblock size to look up all IPs within (CIDR value, 24 = /24, 16 = /16, etc.)",
'subnetlookup': "Look up all IPs on subnets which your target is a part of for blacklisting?",
'maxsubnet': "If looking up subnets, the maximum subnet size to look up all the IPs within (CIDR value, 24 = /24, 16 = /16, etc.)"
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.errorState = False
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return [
'IP_ADDRESS',
'AFFILIATE_IPADDR',
'NETBLOCK_OWNER',
'NETBLOCK_MEMBER',
'INTERNET_NAME',
'AFFILIATE_INTERNET_NAME',
'CO_HOSTED_SITE',
]
def producedEvents(self):
return [
"BLACKLISTED_IPADDR",
"BLACKLISTED_AFFILIATE_IPADDR",
"BLACKLISTED_SUBNET",
"BLACKLISTED_NETBLOCK",
"BLACKLISTED_INTERNET_NAME",
"BLACKLISTED_AFFILIATE_INTERNET_NAME",
"BLACKLISTED_COHOST",
"MALICIOUS_IPADDR",
"MALICIOUS_AFFILIATE_IPADDR",
"MALICIOUS_NETBLOCK",
"MALICIOUS_SUBNET",
"MALICIOUS_INTERNET_NAME",
"MALICIOUS_AFFILIATE_INTERNET_NAME",
"MALICIOUS_COHOST",
]
# Swap 1.2.3.4 to 4.3.2.1
def reverseAddr(self, ipaddr):
if not self.sf.validIP(ipaddr):
self.debug(f"Invalid IPv4 address {ipaddr}")
return None
return '.'.join(reversed(ipaddr.split('.')))
def query(self, qaddr):
"""Query SURBL DNS.
Args:
qaddr (str): Host name or IPv4 address.
Returns:
list: SURBL DNS entries
"""
if self.sf.validIP(qaddr):
lookup = self.reverseAddr(qaddr) + '.multi.surbl.org'
else:
lookup = f"{qaddr}.multi.surbl.org"
self.debug(f"Checking SURBL blacklist: {lookup}")
try:
return self.sf.resolveHost(lookup)
except Exception as e:
self.debug(f"SURBL did not resolve {qaddr} / {lookup}: {e}")
return None
def handleEvent(self, event):
eventName = event.eventType
eventData = event.data
self.debug(f"Received event, {eventName}, from {event.module}")
if eventData in self.results:
return
self.results[eventData] = True
if eventName == "AFFILIATE_IPADDR":
if not self.opts.get('checkaffiliates', False):
return
malicious_type = "MALICIOUS_AFFILIATE_IPADDR"
blacklist_type = "BLACKLISTED_AFFILIATE_IPADDR"
elif eventName == "IP_ADDRESS":
malicious_type = "MALICIOUS_IPADDR"
blacklist_type = "BLACKLISTED_IPADDR"
elif eventName == 'NETBLOCK_MEMBER':
if not self.opts['subnetlookup']:
return
max_subnet = self.opts['maxsubnet']
if IPNetwork(eventData).prefixlen < max_subnet:
self.debug(f"Network size bigger than permitted: {IPNetwork(eventData).prefixlen} > {max_subnet}")
return
malicious_type = "MALICIOUS_SUBNET"
blacklist_type = "BLACKLISTED_SUBNET"
elif eventName == 'NETBLOCK_OWNER':
if not self.opts['netblocklookup']:
return
max_netblock = self.opts['maxnetblock']
if IPNetwork(eventData).prefixlen < max_netblock:
self.debug(f"Network size bigger than permitted: {IPNetwork(eventData).prefixlen} > {max_netblock}")
return
malicious_type = "MALICIOUS_NETBLOCK"
blacklist_type = "BLACKLISTED_NETBLOCK"
elif eventName == "INTERNET_NAME":
malicious_type = "MALICIOUS_INTERNET_NAME"
blacklist_type = "BLACKLISTED_INTERNET_NAME"
elif eventName == "AFFILIATE_INTERNET_NAME":
if not self.opts.get('checkaffiliates', False):
return
malicious_type = "MALICIOUS_AFFILIATE_INTERNET_NAME"
blacklist_type = "BLACKLISTED_AFFILIATE_INTERNET_NAME"
elif eventName == "CO_HOSTED_SITE":
if not self.opts.get('checkcohosts', False):
return
malicious_type = "MALICIOUS_COHOST"
blacklist_type = "BLACKLISTED_COHOST"
else:
self.debug(f"Unexpected event type {eventName}, skipping")
return
addrs = list()
if eventName.startswith("NETBLOCK_"):
for addr in IPNetwork(eventData):
addrs.append(str(addr))
else:
addrs.append(eventData)
for addr in addrs:
if self.checkForStop():
return
if self.errorState:
return
res = self.query(addr)
self.results[addr] = True
if not res:
continue
self.debug(f"{addr} found in SURBL DNS")
for result in res:
k = str(result)
if not k.startswith('127.0.0.'):
continue
if k == '127.0.0.1':
self.error('SURBL rejected lookup request.')
self.errorState = True
continue
evt = SpiderFootEvent(blacklist_type, f"SURBL [{addr}]", self.__name__, event)
self.notifyListeners(evt)
evt = SpiderFootEvent(malicious_type, f"SURBL [{addr}]", self.__name__, event)
self.notifyListeners(evt)
# End of sfp_surbl class
| mit | 1be9599861b607b101656d96cb94e932 | 33.29646 | 159 | 0.545478 | 3.881322 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_greynoise.py | 1 | 13723 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_greynoise
# Purpose: Query GreyNoise's API
#
# Author: Steve Micallef
# Updated By: Brad Chiappetta, GreyNoise
#
# Created: 20/11/2018
# Updated: 31-Aug-2022
# Copyright: (c) Steve Micallef
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import time
from datetime import datetime
from netaddr import IPNetwork
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_greynoise(SpiderFootPlugin):
meta = {
"name": "GreyNoise",
"summary": "Obtain IP enrichment data from GreyNoise",
"flags": ["apikey"],
"useCases": ["Investigate", "Passive"],
"categories": ["Reputation Systems"],
"dataSource": {
"website": "https://greynoise.io/",
"model": "FREE_AUTH_LIMITED",
"references": ["https://docs.greynoise.io/", "https://viz.greynoise.io/signup"],
"apiKeyInstructions": [
"Visit https://viz.greynoise.io/signup",
"Sign up for a free account",
"Navigate to https://viz.greynoise.io/account",
"The API key is listed under 'API Key'",
],
"favIcon": "https://viz.greynoise.io/favicon.ico",
"logo": "https://viz.greynoise.io/_nuxt/img/greynoise-logo.dccd59d.png",
"description": "At GreyNoise, we collect and analyze untargeted, widespread, "
"and opportunistic scan and attack activity that reaches every server directly connected to the Internet. "
"Mass scanners (such as Shodan and Censys), search engines, bots, worms, "
"and crawlers generate logs and events omnidirectionally on every IP address in the IPv4 space. "
"GreyNoise gives you the ability to filter this useless noise out.",
},
}
# Default options
opts = {
"api_key": "",
"age_limit_days": 30,
"netblocklookup": True,
"maxnetblock": 24,
"subnetlookup": True,
"maxsubnet": 24
# 'asnlookup': True
}
# Option descriptions
optdescs = {
"api_key": "GreyNoise API Key.",
"age_limit_days": "Ignore any records older than this many days. 0 = unlimited.",
"netblocklookup": "Look up netblocks deemed to be owned by your target for possible blacklisted hosts on the same target subdomain/domain?",
"maxnetblock": "If looking up owned netblocks, the maximum netblock size to look up all IPs within (CIDR value, 24 = /24, 16 = /16, etc.)",
"subnetlookup": "Look up subnets which your target is a part of for blacklisting?",
"maxsubnet": "If looking up subnets, the maximum subnet size to look up all the IPs within (CIDR value, 24 = /24, 16 = /16, etc.)"
# 'asnlookup': "Look up ASNs that your target is a member of?"
}
# Be sure to completely clear any class variables in setup()
# or you run the risk of data persisting between scan runs.
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
# Clear / reset any other class member variables here
# or you risk them persisting between threads.
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["IP_ADDRESS", "AFFILIATE_IPADDR", "NETBLOCK_MEMBER", "NETBLOCK_OWNER"]
# What events this module produces
def producedEvents(self):
return [
"MALICIOUS_IPADDR",
"MALICIOUS_ASN",
"MALICIOUS_SUBNET",
"MALICIOUS_AFFILIATE_IPADDR",
"MALICIOUS_NETBLOCK",
"COMPANY_NAME",
"GEOINFO",
"BGP_AS_MEMBER",
"OPERATING_SYSTEM",
"RAW_RIR_DATA",
]
def queryIP(self, qry, qry_type):
gn_context_url = "https://api.greynoise.io/v2/noise/context/"
gn_gnql_url = "https://api.greynoise.io/v2/experimental/gnql?query="
headers = {"key": self.opts["api_key"]}
res = {}
if qry_type == "ip":
self.debug(f"Querying GreyNoise for IP: {qry}")
res = {}
ip_response = self.sf.fetchUrl(
gn_context_url + qry,
timeout=self.opts["_fetchtimeout"],
useragent="greynoise-spiderfoot-v1.2.0",
headers=headers,
)
if ip_response["code"] == "200":
res = json.loads(ip_response["content"])
else:
self.debug(f"Querying GreyNoise for Netblock: {qry}")
query_response = self.sf.fetchUrl(
gn_gnql_url + qry,
timeout=self.opts["_fetchtimeout"],
useragent="greynoise-spiderfoot-v1.1.0",
headers=headers,
)
if query_response["code"] == "200":
res = json.loads(query_response["content"])
if not res:
self.error("Greynoise API key seems to have been rejected or you have exceeded usage limits.")
self.errorState = True
return None
return res
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if self.opts["api_key"] == "":
self.error("You enabled sfp_greynoise but did not set an API key!")
self.errorState = True
return
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
self.results[eventData] = True
if eventName == "NETBLOCK_OWNER":
if not self.opts["netblocklookup"]:
return
else:
if IPNetwork(eventData).prefixlen < self.opts["maxnetblock"]:
self.debug(
"Network size bigger than permitted: "
+ str(IPNetwork(eventData).prefixlen)
+ " > "
+ str(self.opts["maxnetblock"])
)
return
if eventName == "NETBLOCK_MEMBER":
if not self.opts["subnetlookup"]:
return
else:
if IPNetwork(eventData).prefixlen < self.opts["maxsubnet"]:
self.debug(
"Network size bigger than permitted: "
+ str(IPNetwork(eventData).prefixlen)
+ " > "
+ str(self.opts["maxsubnet"])
)
return
if eventName == "IP_ADDRESS":
evtType = "MALICIOUS_IPADDR"
qryType = "ip"
if eventName.startswith("NETBLOCK_"):
evtType = "MALICIOUS_IPADDR"
qryType = "netblock"
if eventName == "AFFILIATE_IPADDR":
evtType = "MALICIOUS_AFFILIATE_IPADDR"
qryType = "ip"
ret = self.queryIP(eventData, qryType)
if not ret:
return
if "data" not in ret and "seen" not in ret:
return
if "data" in ret and len(ret["data"]) > 0:
for rec in ret["data"]:
if rec.get("seen", None):
self.debug(f"Found threat info in Greynoise: {rec['ip']}")
lastseen = rec.get("last_seen", "1970-01-01")
lastseen_dt = datetime.strptime(lastseen, "%Y-%m-%d")
lastseen_ts = int(time.mktime(lastseen_dt.timetuple()))
age_limit_ts = int(time.time()) - (86400 * self.opts["age_limit_days"])
if self.opts["age_limit_days"] > 0 and lastseen_ts < age_limit_ts:
self.debug(f"Record [{rec['ip']}] found but too old, skipping.")
return
# Only report meta data about the target, not affiliates
if rec.get("metadata") and eventName == "IP_ADDRESS":
met = rec.get("metadata")
if met.get("country", "unknown") != "unknown":
loc = ""
if met.get("city"):
loc = met.get("city") + ", "
loc += met.get("country")
e = SpiderFootEvent("GEOINFO", loc, self.__name__, event)
self.notifyListeners(e)
if met.get("asn", "unknown") != "unknown":
asn = met.get("asn").replace("AS", "")
e = SpiderFootEvent("BGP_AS_MEMBER", asn, self.__name__, event)
self.notifyListeners(e)
if met.get("organization", "unknown") != "unknown":
e = SpiderFootEvent("COMPANY_NAME", met.get("organization"), self.__name__, event)
self.notifyListeners(e)
if met.get("os", "unknown") != "unknown":
e = SpiderFootEvent("OPERATING_SYSTEM", met.get("os"), self.__name__, event)
self.notifyListeners(e)
e = SpiderFootEvent("RAW_RIR_DATA", str(rec), self.__name__, event)
self.notifyListeners(e)
if rec.get("classification"):
descr = (
"GreyNoise - Mass-Scanning IP Detected ["
+ rec.get("ip")
+ "]\n - Classification: "
+ rec.get("classification")
)
if rec.get("tags"):
descr += "\n - " + "Scans For Tags: " + ", ".join(rec.get("tags"))
if rec.get("cve"):
descr += "\n - " + "Scans For CVEs: " + ", ".join(rec.get("cve"))
if rec.get("raw_data") and not (rec.get("tags") or ret.get("cve")):
descr += "\n - " + "Raw data: " + str(rec.get("raw_data"))
descr += "\n<SFURL>https://viz.greynoise.io/ip/" + rec.get("ip") + "</SFURL>"
e = SpiderFootEvent(evtType, descr, self.__name__, event)
self.notifyListeners(e)
if "seen" in ret:
if ret.get("seen", None):
lastseen = ret.get("last_seen", "1970-01-01")
lastseen_dt = datetime.strptime(lastseen, "%Y-%m-%d")
lastseen_ts = int(time.mktime(lastseen_dt.timetuple()))
age_limit_ts = int(time.time()) - (86400 * self.opts["age_limit_days"])
if self.opts["age_limit_days"] > 0 and lastseen_ts < age_limit_ts:
self.debug("Record found but too old, skipping.")
return
# Only report meta data about the target, not affiliates
if ret.get("metadata") and eventName == "IP_ADDRESS":
met = ret.get("metadata")
if met.get("country", "unknown") != "unknown":
loc = ""
if met.get("city"):
loc = met.get("city") + ", "
loc += met.get("country")
e = SpiderFootEvent("GEOINFO", loc, self.__name__, event)
self.notifyListeners(e)
if met.get("asn", "unknown") != "unknown":
asn = met.get("asn").replace("AS", "")
e = SpiderFootEvent("BGP_AS_MEMBER", asn, self.__name__, event)
self.notifyListeners(e)
if met.get("organization", "unknown") != "unknown":
e = SpiderFootEvent("COMPANY_NAME", met.get("organization"), self.__name__, event)
self.notifyListeners(e)
if met.get("os", "unknown") != "unknown":
e = SpiderFootEvent("OPERATING_SYSTEM", met.get("os"), self.__name__, event)
self.notifyListeners(e)
e = SpiderFootEvent("RAW_RIR_DATA", str(ret), self.__name__, event)
self.notifyListeners(e)
if ret.get("classification"):
descr = (
"GreyNoise - Mass-Scanning IP Detected ["
+ eventData
+ "]\n - Classification: "
+ ret.get("classification")
)
if ret.get("tags"):
descr += "\n - " + "Scans For Tags: " + ", ".join(ret.get("tags"))
if ret.get("cve"):
descr += "\n - " + "Scans For CVEs: " + ", ".join(ret.get("cve"))
if ret.get("raw_data") and not (ret.get("tags") or ret.get("cve")):
descr += "\n - " + "Raw data: " + str(ret.get("raw_data"))
descr += "\n<SFURL>https://viz.greynoise.io/ip/" + ret.get("ip") + "</SFURL>"
e = SpiderFootEvent(evtType, descr, self.__name__, event)
self.notifyListeners(e)
# End of sfp_greynoise class
| mit | 8c58d1af3f675d04e5ccc12f076632a6 | 43.125402 | 148 | 0.486628 | 4.162269 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_phone.py | 1 | 3919 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_phone
# Purpose: SpiderFoot plug-in for scanning retrieved content by other
# modules (such as sfp_spider) to identify phone numbers, and
# lookup carrier information in Google's libphonenumber DB.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 19/06/2016
# Copyright: (c) Steve Micallef 2016
# Licence: MIT
# -------------------------------------------------------------------------------
import phonenumbers
from phonenumbers import carrier
# from phonenumbers import geocoder
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_phone(SpiderFootPlugin):
meta = {
'name': "Phone Number Extractor",
'summary': "Identify phone numbers in scraped webpages.",
'flags': [],
'useCases': ["Passive", "Footprint", "Investigate"],
'categories': ["Content Analysis"]
}
opts = {}
results = None
optdescs = {}
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ['TARGET_WEB_CONTENT', 'DOMAIN_WHOIS', 'NETBLOCK_WHOIS', 'PHONE_NUMBER']
def producedEvents(self):
return ['PHONE_NUMBER', 'PROVIDER_TELCO']
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
sourceData = self.sf.hashstring(eventData)
if sourceData in self.results:
return
self.results[sourceData] = True
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if eventName in ['TARGET_WEB_CONTENT', 'DOMAIN_WHOIS', 'NETBLOCK_WHOIS']:
# Make potential phone numbers more friendly to parse
content = eventData.replace('.', '-')
for match in phonenumbers.PhoneNumberMatcher(content, region=None):
n = phonenumbers.format_number(match.number, phonenumbers.PhoneNumberFormat.E164)
evt = SpiderFootEvent("PHONE_NUMBER", n, self.__name__, event)
if event.moduleDataSource:
evt.moduleDataSource = event.moduleDataSource
else:
evt.moduleDataSource = "Unknown"
self.notifyListeners(evt)
if eventName == 'PHONE_NUMBER':
try:
number = phonenumbers.parse(eventData)
except Exception as e:
self.debug(f"Error parsing phone number: {e}")
return
try:
number_carrier = carrier.name_for_number(number, 'en')
except Exception as e:
self.debug(f"Error retrieving phone number carrier: {e}")
return
if not number_carrier:
self.debug(f"No carrier information found for {eventData}")
return
evt = SpiderFootEvent("PROVIDER_TELCO", number_carrier, self.__name__, event)
if event.moduleDataSource:
evt.moduleDataSource = event.moduleDataSource
else:
evt.moduleDataSource = "Unknown"
self.notifyListeners(evt)
# try:
# location = geocoder.description_for_number(number, 'en')
# except Exception as e:
# self.debug('Error retrieving phone number location: ' + str(e))
# return
# if location:
# evt = SpiderFootEvent("GEOINFO", location, self.__name__, event)
# self.notifyListeners(evt)
# else:
# self.debug("No location information found for " + eventData)
# End of sfp_phone class
| mit | dd5777c4743a4264fc2eb46d0846892a | 33.377193 | 97 | 0.557285 | 4.22306 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_keybase.py | 1 | 9296 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_keybase
# Purpose: Spiderfoot plugin to query KeyBase API
# to gather additional information about domain names and identified
# usernames.
#
# Author: Krishnasis Mandal <krishnasis@hotmail.com>
#
# Created: 22/05/2020
# Copyright: (c) Steve Micallef
# Licence: MIT
# -------------------------------------------------------------------------------
import json
import re
import urllib.error
import urllib.parse
import urllib.request
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_keybase(SpiderFootPlugin):
meta = {
'name': "Keybase",
'summary': "Obtain additional information about domain names and identified usernames.",
'flags': [],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Public Registries"],
'dataSource': {
'website': "https://keybase.io/",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"https://keybase.io/docs/api/1.0/call/user/lookup",
],
'favIcon': "https://keybase.io/images/icons/icon-keybase-logo-48.png",
'logo': "https://keybase.io/images/icons/icon-keybase-logo-48.png",
'description': "Keybase is a key directory that maps social media identities to encryption keys "
"in a publicly auditable manner.",
}
}
opts = {
}
optdescs = {
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ["USERNAME", "LINKED_URL_EXTERNAL", "DOMAIN_NAME"]
def producedEvents(self):
return [
"RAW_RIR_DATA", "SOCIAL_MEDIA", "USERNAME",
"GEOINFO", "BITCOIN_ADDRESS", "PGP_KEY"
]
def query(self, selector: str, qry: str) -> str:
"""Search Keybase for a domain name or username.
Args:
selector (str): query type ("usernames" | "domain")
qry (str): username
Returns:
str: Search results as JSON string
"""
if not selector:
return None
if not qry:
return None
params = {
selector: qry.encode('raw_unicode_escape').decode("ascii", errors='replace')
}
headers = {
'Accept': "application/json"
}
res = self.sf.fetchUrl(
'https://keybase.io/_/api/1.0/user/lookup.json?' + urllib.parse.urlencode(params),
headers=headers,
timeout=15,
useragent=self.opts['_useragent']
)
# In this case, it will always be 200 if keybase is queried
# The actual response codes are stored in status tag of the response
if res['code'] != '200':
self.error(f"Unexpected reply from Keybase: {res['code']}")
return None
try:
content = json.loads(res['content'])
except Exception as e:
self.debug(f"Error processing JSON response: {e}")
return None
status = content.get('status')
if not status:
return None
code = status.get('code')
if code != 0:
self.error(f"Unexpected JSON response code reply from Keybase: {code}")
return None
them = content.get('them')
if not isinstance(them, list):
return None
return them
def handleEvent(self, event) -> None:
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
self.results[eventData] = True
# Extract username if a Keybase link is received
if eventName == "LINKED_URL_EXTERNAL":
linkRegex = r"^https?://keybase.io\/[A-Za-z0-9\-_\.]+"
links = re.findall(linkRegex, eventData)
if len(links) == 0:
self.debug(f"Skipping URL {eventData}, as not a keybase link")
return
userName = links[0].split("/")[3]
data = self.query('usernames', userName)
elif eventName == "USERNAME":
data = self.query('usernames', eventData)
elif eventName == "DOMAIN_NAME":
data = self.query('domain', eventData)
else:
return
if not data:
self.debug(f"No data found for {eventName}: {eventData}")
return
for user in data:
if not user:
continue
# Basic information about the username
basics = user.get('basics')
if not basics:
continue
username = basics.get('username')
if not username:
continue
# Failsafe to prevent reporting any wrongly received data
if eventName == "USERNAME":
if eventData.lower() != username.lower():
self.error("Username does not match received response, skipping")
continue
# For newly discovereed usernames, create a username event to be used as a source event
if eventName in ['LINKED_URL_EXTERNAL', 'DOMAIN_NAME']:
if username in self.results:
self.debug(f"Skipping {userName}, already checked.")
continue
source_event = SpiderFootEvent("USERNAME", username, self.__name__, event)
self.notifyListeners(source_event)
self.results[username] = True
else:
source_event = event
evt = SpiderFootEvent("RAW_RIR_DATA", str(user), self.__name__, source_event)
self.notifyListeners(evt)
# Profile information about the username
profile = user.get('profile')
if profile:
# Get and report full name of user
fullName = profile.get('full_name')
if fullName:
evt = SpiderFootEvent("RAW_RIR_DATA", f"Possible full name: {fullName}", self.__name__, source_event)
self.notifyListeners(evt)
# Get and report location of user
location = profile.get('location')
if location:
evt = SpiderFootEvent("GEOINFO", location, self.__name__, source_event)
self.notifyListeners(evt)
# Extract social media information
proofsSummary = user.get('proofs_summary')
if proofsSummary:
socialMediaData = proofsSummary.get('all')
if socialMediaData:
for socialMedia in socialMediaData:
socialMediaSite = socialMedia.get('proof_type')
socialMediaURL = socialMedia.get('service_url')
if socialMediaSite and socialMediaURL:
socialMedia = socialMediaSite + ": " + "<SFURL>" + socialMediaURL + "</SFURL>"
evt = SpiderFootEvent("SOCIAL_MEDIA", socialMedia, self.__name__, source_event)
self.notifyListeners(evt)
# Get cryptocurrency addresses
cryptoAddresses = user.get('cryptocurrency_addresses')
# Extract and report bitcoin addresses if any
if cryptoAddresses:
bitcoinAddresses = cryptoAddresses.get('bitcoin')
if bitcoinAddresses:
for bitcoinAddress in bitcoinAddresses:
btcAddress = bitcoinAddress.get('address')
if not btcAddress:
continue
evt = SpiderFootEvent("BITCOIN_ADDRESS", btcAddress, self.__name__, source_event)
self.notifyListeners(evt)
# Extract PGP Keys
pgpRegex = r"-----BEGIN\s*PGP\s*(?:PUBLIC?)\s*KEY\s*BLOCK-----(.*?)-----END\s*PGP\s*(?:PUBLIC?)\s*KEY\s*BLOCK-----"
pgpKeys = re.findall(pgpRegex, str(user))
for pgpKey in pgpKeys:
if len(pgpKey) < 300:
self.debug(f"PGP key size ({len(pgpKey)} bytes) is likely invalid (smaller than 300 bytes), skipping.")
continue
# Remove unescaped \n literals
pgpKey = pgpKey.replace("\\n", "\n")
# Avoid reporting of duplicate keys
pgpKeyHash = self.sf.hashstring(pgpKey)
if pgpKeyHash in self.results:
continue
self.results[pgpKeyHash] = True
evt = SpiderFootEvent("PGP_KEY", pgpKey, self.__name__, source_event)
self.notifyListeners(evt)
# End of sfp_keybase class
| mit | a6c843368d9c2947e20d4ba6c7962fdd | 32.927007 | 127 | 0.531519 | 4.378709 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_callername.py | 1 | 4692 | # -------------------------------------------------------------------------------
# Name: sfp_callername
# Purpose: SpiderFoot plug-in to search CallerName.com for a phone number
# (US only) and retrieve location and reputation information.
#
# Author: <bcoles@gmail.com>
#
# Created: 2019-05-28
# Copyright: (c) bcoles 2019
# Licence: MIT
# -------------------------------------------------------------------------------
import re
import time
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_callername(SpiderFootPlugin):
meta = {
'name': "CallerName",
'summary': "Lookup US phone number location and reputation information.",
'flags': [],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Real World"],
'dataSource': {
'website': "http://callername.com/",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"https://callername.com/faq",
"https://callername.com/stats"
],
'favIcon': "http://static.callername.com/favicon.ico",
'logo': "http://static.callername.com/img/logo.min.png",
'description': "CallerName is a free, reverse phone lookup service for both cell and landline numbers. "
"It relies on a database of white pages and business pages taken from public sources. "
"The easy-to-use and streamlined interface allow users to look up the caller ID information of any number quickly. "
"Just type the unknown number into the search bar to start. "
"You need not pay nor register to use this 100% free service.",
}
}
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.errorState = False
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ['PHONE_NUMBER']
# What events this module produces
def producedEvents(self):
return ['GEOINFO', 'MALICIOUS_PHONE_NUMBER']
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
if eventData in self.results:
return
self.results[eventData] = True
self.debug(f"Received event, {eventName}, from {srcModuleName}")
# Only US numbers are supported (+1)
if not eventData.startswith('+1'):
self.debug('Unsupported phone number: ' + eventData)
return
# Strip country code (+1) and formatting
number = eventData.lstrip('+1').strip('(').strip(')').strip('-').strip(' ')
if not number.isdigit():
self.debug('Invalid phone number: ' + number)
return
# Query CallerName.com for the specified phone number
url = f"https://callername.com/{number}"
res = self.sf.fetchUrl(url, timeout=self.opts['_fetchtimeout'], useragent=self.opts['_useragent'])
time.sleep(1)
if res['content'] is None:
self.debug('No response from CallerName.com')
return
if res['code'] != '200':
self.debug('No phone information found for ' + eventData)
return
location_match = re.findall(r'<div class="callerid"><h4>.*?</h4><p>(.+?)</p></div>', str(res['content']), re.MULTILINE | re.DOTALL)
if location_match:
location = location_match[0]
if len(location) < 5 or len(location) > 100:
self.debug("Skipping likely invalid location.")
else:
evt = SpiderFootEvent('GEOINFO', location, self.__name__, event)
self.notifyListeners(evt)
rep_good_match = re.findall(r'>SAFE.*?>(\d+) votes?<', str(res['content']))
rep_bad_match = re.findall(r'>UNSAFE.*?>(\d+) votes?<', str(res['content']))
if rep_good_match and rep_bad_match:
good_votes = int(rep_good_match[0])
bad_votes = int(rep_bad_match[0])
if bad_votes > good_votes:
text = f"CallerName [{eventData}]\n<SFURL>{url}</SFURL>"
evt = SpiderFootEvent('MALICIOUS_PHONE_NUMBER', text, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_callername class
| mit | aabee7baecd3637824e24cc59cb64564 | 33.5 | 139 | 0.562873 | 4.010256 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_blocklistde.py | 1 | 7574 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_blocklistde
# Purpose: Check if a netblock or IP is malicious according to blocklist.de.
#
# Author: steve@binarypool.com
#
# Created: 14/12/2013
# Copyright: (c) Steve Micallef, 2013
# Licence: MIT
# -------------------------------------------------------------------------------
from netaddr import IPAddress, IPNetwork
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_blocklistde(SpiderFootPlugin):
meta = {
'name': "blocklist.de",
'summary': "Check if a netblock or IP is malicious according to blocklist.de.",
'flags': [],
'useCases': ["Investigate", "Passive"],
'categories': ["Reputation Systems"],
'dataSource': {
'website': "http://www.blocklist.de/en/index.html",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"http://www.blocklist.de/en/api.html",
"http://www.blocklist.de/en/rbldns.html",
"http://www.blocklist.de/en/httpreports.html",
"http://www.blocklist.de/en/export.html",
"http://www.blocklist.de/en/delist.html?ip="
],
'favIcon': "http://www.blocklist.de/templates/css/logo_web-size.jpg",
'logo': "http://www.blocklist.de/templates/css/logo_web-size.jpg",
'description': "www.blocklist.de is a free and voluntary service provided by a Fraud/Abuse-specialist, "
"whose servers are often attacked via SSH-, Mail-Login-, FTP-, Webserver- and other services.\n"
"The mission is to report any and all attacks to the respective abuse departments of the infected PCs/servers, "
"to ensure that the responsible provider can inform their customer about the infection and disable the attacker."
}
}
opts = {
'checkaffiliates': True,
'cacheperiod': 18,
'checknetblocks': True,
'checksubnets': True
}
optdescs = {
'checkaffiliates': "Apply checks to affiliates?",
'cacheperiod': "Hours to cache list data before re-fetching.",
'checknetblocks': "Report if any malicious IPs are found within owned netblocks?",
'checksubnets': "Check if any malicious IPs are found within the same subnet of the target?"
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.errorState = False
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return [
"IP_ADDRESS",
"IPV6_ADDRESS",
"AFFILIATE_IPADDR",
"AFFILIATE_IPV6_ADDRESS",
"NETBLOCK_MEMBER",
"NETBLOCKV6_MEMBER",
"NETBLOCK_OWNER",
"NETBLOCKV6_OWNER",
]
def producedEvents(self):
return [
"BLACKLISTED_IPADDR",
"BLACKLISTED_AFFILIATE_IPADDR",
"BLACKLISTED_SUBNET",
"BLACKLISTED_NETBLOCK",
"MALICIOUS_IPADDR",
"MALICIOUS_AFFILIATE_IPADDR",
"MALICIOUS_NETBLOCK",
"MALICIOUS_SUBNET",
]
def queryBlacklist(self, target, targetType):
blacklist = self.retrieveBlacklist()
if not blacklist:
return False
if targetType == "ip":
if target in blacklist:
self.debug(f"IP address {target} found in blocklist.de blacklist.")
return True
elif targetType == "netblock":
netblock = IPNetwork(target)
for ip in blacklist:
if IPAddress(ip) in netblock:
self.debug(f"IP address {ip} found within netblock/subnet {target} in blocklist.de blacklist.")
return True
return False
def retrieveBlacklist(self):
blacklist = self.sf.cacheGet('blocklistde', 24)
if blacklist is not None:
return self.parseBlacklist(blacklist)
res = self.sf.fetchUrl(
"https://lists.blocklist.de/lists/all.txt",
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
)
if res['code'] != "200":
self.error(f"Unexpected HTTP response code {res['code']} from blocklist.de.")
self.errorState = True
return None
if res['content'] is None:
self.error("Received no content from blocklist.de")
self.errorState = True
return None
self.sf.cachePut("blocklistde", res['content'])
return self.parseBlacklist(res['content'])
def parseBlacklist(self, blacklist):
"""Parse plaintext blacklist
Args:
blacklist (str): plaintext blacklist from blocklist.de
Returns:
list: list of blacklisted IP addresses
"""
ips = list()
if not blacklist:
return ips
for ip in blacklist.split('\n'):
ip = ip.strip()
if ip.startswith('#'):
continue
if not self.sf.validIP(ip) and not self.sf.validIP6(ip):
continue
ips.append(ip)
return ips
def handleEvent(self, event):
eventName = event.eventType
eventData = event.data
self.debug(f"Received event, {eventName}, from {event.module}")
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
if self.errorState:
return
self.results[eventData] = True
if eventName in ['IP_ADDRESS', 'IPV6_ADDRESS']:
targetType = 'ip'
malicious_type = 'MALICIOUS_IPADDR'
blacklist_type = 'BLACKLISTED_IPADDR'
elif eventName in ['AFFILIATE_IPADDR', 'AFFILIATE_IPV6_ADDRESS']:
if not self.opts.get('checkaffiliates', False):
return
targetType = 'ip'
malicious_type = 'MALICIOUS_AFFILIATE_IPADDR'
blacklist_type = 'BLACKLISTED_AFFILIATE_IPADDR'
elif eventName in ['NETBLOCK_OWNER', 'NETBLOCKV6_OWNER']:
if not self.opts.get('checknetblocks', False):
return
targetType = 'netblock'
malicious_type = 'MALICIOUS_NETBLOCK'
blacklist_type = 'BLACKLISTED_NETBLOCK'
elif eventName in ['NETBLOCK_MEMBER', 'NETBLOCKV6_MEMBER']:
if not self.opts.get('checksubnets', False):
return
targetType = 'netblock'
malicious_type = 'MALICIOUS_SUBNET'
blacklist_type = 'BLACKLISTED_SUBNET'
else:
self.debug(f"Unexpected event type {eventName}, skipping")
return
self.debug(f"Checking maliciousness of {eventData} ({eventName}) with blocklist.de")
if self.queryBlacklist(eventData, targetType):
# https://www.blocklist.de/en/search.html?ip=<ip>
url = "https://lists.blocklist.de/lists/all.txt"
text = f"blocklist.de [{eventData}]\n<SFURL>{url}</SFURL>"
evt = SpiderFootEvent(malicious_type, text, self.__name__, event)
self.notifyListeners(evt)
evt = SpiderFootEvent(blacklist_type, text, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_blocklistde class
| mit | d25d1743f77d1b2fb83001de2fcfc789 | 33.903226 | 125 | 0.56245 | 3.950965 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_botscout.py | 1 | 6261 | # -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_botscout
# Purpose: SpiderFoot plug-in to search botsout.com using their API, for
# potential malicious IPs and e-mail addresses.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 25/07/2016
# Copyright: (c) Steve Micallef 2016
# Licence: MIT
# -------------------------------------------------------------------------------
import urllib.error
import urllib.parse
import urllib.request
from spiderfoot import SpiderFootEvent, SpiderFootHelpers, SpiderFootPlugin
class sfp_botscout(SpiderFootPlugin):
meta = {
'name': "BotScout",
'summary': "Searches BotScout.com's database of spam-bot IP addresses and e-mail addresses.",
'flags': ["apikey"],
'useCases': ["Passive", "Investigate"],
'categories': ["Reputation Systems"],
'dataSource': {
'website': "https://botscout.com/",
'model': "FREE_NOAUTH_LIMITED",
'references': [
"http://botscout.com/api.htm",
"http://botscout.com/api_queries.htm",
"http://botscout.com/getkey.htm",
"http://botscout.com/corp_users.htm"
],
'apiKeyInstructions': [
"Visit http://botscout.com/getkey.htm",
"Register a free account",
"The API key will be emailed to your account"
],
'favIcon': "https://botscout.com/favicon.ico",
'logo': "http://botscout.com/image/bslogo.gif",
'description': "BotScout helps prevent automated web scripts, known as \"bots\", "
"from registering on forums, polluting databases, spreading spam, "
"and abusing forms on web sites. We do this by tracking the names, IPs, "
"and email addresses that bots use and logging them as unique signatures for future reference. "
"We also provide a simple yet powerful API that you can use to test forms "
"when they're submitted on your site.",
}
}
opts = {
"api_key": ""
}
optdescs = {
"api_key": "Botscout.com API key. Without this you will be limited to 100 look-ups per day."
}
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ['IP_ADDRESS', 'EMAILADDR']
def producedEvents(self):
return ["MALICIOUS_IPADDR", "BLACKLISTED_IPADDR", "MALICIOUS_EMAILADDR"]
def queryIp(self, ip):
if not self.sf.validIP(ip):
return None
params = urllib.parse.urlencode({
'ip': ip,
'key': self.opts['api_key'],
})
res = self.sf.fetchUrl(
f"https://botscout.com/test/?{params}",
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
)
return self.parseApiResponse(res)
def queryEmail(self, email):
if not SpiderFootHelpers.validEmail(email):
return None
params = urllib.parse.urlencode({
'mail': email,
'key': self.opts['api_key'],
})
res = self.sf.fetchUrl(
f"https://botscout.com/test/?{params}",
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'],
)
return self.parseApiResponse(res)
def parseApiResponse(self, res: dict):
if not res:
self.error("No response from BotScout.")
return None
if res['code'] != "200":
self.error(f"Unexpected HTTP response code {res['code']} from BotScout.")
self.errorState = True
return None
if not res['content']:
self.error("No response from BotScout.")
return None
if res['content'].startswith("! "):
self.error(f"Received error from BotScout: {res['content']}")
self.errorState = True
return None
if not res['content'].startswith("Y|") and not res['content'].startswith("N|"):
self.error("Error encountered processing response from BotScout.")
return None
return res['content']
def handleEvent(self, event):
eventName = event.eventType
eventData = event.data
if self.errorState:
return
self.debug(f"Received event, {eventName}, from {event.module}")
if not self.opts['api_key']:
self.info("You enabled sfp_botscout but did not set an API key! Queries will be limited to 100 per day.")
if eventData in self.results:
self.debug(f"Skipping {eventData} as already searched.")
return
self.results[eventData] = True
if eventName == "IP_ADDRESS":
res = self.queryIp(eventData)
if not res:
return
if not res.startswith("Y|"):
return
self.info(f"Found BotScout entry for {eventData}: {res}")
url = f"https://botscout.com/ipcheck.htm?ip={eventData}"
text = f"BotScout [{eventData}]\n<SFURL>{url}</SFURL>"
evt = SpiderFootEvent("MALICIOUS_IPADDR", text, self.__name__, event)
self.notifyListeners(evt)
evt = SpiderFootEvent("BLACKLISTED_IPADDR", text, self.__name__, event)
self.notifyListeners(evt)
elif eventName == "EMAILADDR":
res = self.queryEmail(eventData)
if not res:
return
if not res.startswith("Y|"):
return
url = f"https://botscout.com/search.htm?sterm={eventData}&stype=q"
text = f"BotScout [{eventData}]\n<SFURL>{url}</SFURL>"
evt = SpiderFootEvent("MALICIOUS_EMAILADDR", text, self.__name__, event)
self.notifyListeners(evt)
else:
self.debug(f"Unexpected event type {eventName}, skipping")
# End of sfp_botscout class
| mit | 584337e439a131bfce30b0cb1ebd4239 | 31.952632 | 117 | 0.549752 | 3.957649 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_wigle.py | 1 | 6033 | # -------------------------------------------------------------------------------
# Name: sfp_wigle
# Purpose: Query wigle.net to identify nearby WiFi access points.
#
# Author: Steve Micallef <steve@binarypool.com>
#
# Created: 10/09/2017
# Copyright: (c) Steve Micallef
# Licence: MIT
# -------------------------------------------------------------------------------
import base64
import datetime
import json
import urllib.error
import urllib.parse
import urllib.request
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_wigle(SpiderFootPlugin):
meta = {
'name': "WiGLE",
'summary': "Query WiGLE to identify nearby WiFi access points.",
'flags': ["apikey"],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Secondary Networks"],
'dataSource': {
'website': "https://wigle.net/",
'model': "FREE_AUTH_UNLIMITED",
'references': [
"https://api.wigle.net/",
"https://api.wigle.net/swagger"
],
'apiKeyInstructions': [
"Visit https://wigle.net/",
"Register a free account",
"Navigate to https://wigle.net/account",
"Click on 'Show my token'",
"The encoded API key is adjacent to 'Encoded for use'"
],
'favIcon': "https://wigle.net/favicon.ico?v=A0Ra9gElOR",
'logo': "https://wigle.net/images/planet-bubble.png",
'description': "We consolidate location and information of wireless networks world-wide to a central database, "
"and have user-friendly desktop and web applications that can map, "
"query and update the database via the web.",
}
}
# Default options
opts = {
"api_key_encoded": "",
"days_limit": "365",
"variance": "0.01"
}
# Option descriptions
optdescs = {
"api_key_encoded": "Wigle.net base64-encoded API name/token pair.",
"days_limit": "Maximum age of data to be considered valid.",
"variance": "How tightly to bound queries against the latitude/longitude box extracted from idenified addresses. This value must be between 0.001 and 0.2."
}
# Be sure to completely clear any class variables in setup()
# or you run the risk of data persisting between scan runs.
results = None
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = self.tempStorage()
self.errorState = False
# Clear / reset any other class member variables here
# or you risk them persisting between threads.
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ["PHYSICAL_COORDINATES"]
# What events this module produces
def producedEvents(self):
return ["WIFI_ACCESS_POINT"]
def getnetworks(self, coords):
params = {
'onlymine': 'false',
'latrange1': str(coords[0]),
'latrange2': str(coords[0]),
'longrange1': str(coords[1]),
'longrange2': str(coords[1]),
'freenet': 'false',
'paynet': 'false',
'variance': self.opts['variance']
}
if self.opts['days_limit'] != "0":
dt = datetime.datetime.now() - datetime.timedelta(days=int(self.opts['days_limit']))
date_calc = dt.strftime("%Y%m%d")
params['lastupdt'] = date_calc
hdrs = {
"Accept": "application/json",
"Authorization": "Basic " + self.opts['api_key_encoded']
}
res = self.sf.fetchUrl(
"https://api.wigle.net/api/v2/network/search?" + urllib.parse.urlencode(params),
timeout=30,
useragent="SpiderFoot",
headers=hdrs
)
if res['code'] == "404" or not res['content']:
return None
if "too many queries" in res['content']:
self.error("Wigle.net query limit reached for the day.")
return None
ret = list()
try:
info = json.loads(res['content'])
if len(info.get('results', [])) == 0:
return None
for r in info['results']:
if None not in [r['ssid'], r['netid']]:
ret.append(r['ssid'] + " (Net ID: " + r['netid'] + ")")
return ret
except Exception as e:
self.error(f"Error processing JSON response from WiGLE: {e}")
return None
def validApiKey(self, api_key):
if not api_key:
return False
try:
if base64.b64encode(base64.b64decode(api_key)).decode('utf-8') != api_key:
return False
except Exception:
return False
return True
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return
if not self.validApiKey(self.opts['api_key_encoded']):
self.error(f"Invalid API key for {self.__class__.__name__} module")
self.errorState = True
return
self.debug(f"Received event, {eventName}, from {srcModuleName}")
if eventData in self.results:
self.debug(f"Skipping {eventData}, already checked.")
return
self.results[eventData] = True
nets = self.getnetworks(eventData.replace(" ", "").split(","))
if not nets:
self.error("Couldn't get networks for coordinates from Wigle.net.")
return
for n in nets:
e = SpiderFootEvent("WIFI_ACCESS_POINT", n, self.__name__, event)
self.notifyListeners(e)
# End of sfp_wigle class
| mit | e4838b6c09de6a547baab5325f562563 | 31.262032 | 163 | 0.544008 | 4.024683 | false | false | false | false |
smicallef/spiderfoot | modules/sfp_myspace.py | 1 | 5214 | # -------------------------------------------------------------------------------
# Name: sfp_myspace
# Purpose: Query MySpace for username and location information.
#
# Author: <bcoles@gmail.com>
#
# Created: 2018-10-07
# Copyright: (c) bcoles 2018
# Licence: MIT
# -------------------------------------------------------------------------------
import re
from spiderfoot import SpiderFootEvent, SpiderFootPlugin
class sfp_myspace(SpiderFootPlugin):
meta = {
'name': "MySpace",
'summary': "Gather username and location from MySpace.com profiles.",
'flags': [],
'useCases': ["Footprint", "Investigate", "Passive"],
'categories': ["Social Media"],
'dataSource': {
'website': "https://myspace.com/",
'model': "FREE_NOAUTH_UNLIMITED",
'references': [
"https://www.programmableweb.com/api/myspace"
],
'favIcon': "https://x.myspacecdn.com/new/common/images/favicons/favicon.ico",
'logo': "https://x.myspacecdn.com/new/common/images/favicons/114-Retina-iPhone.png",
'description': "Myspace is a place where people come to connect, discover, and share.\n"
"Through an open design, compelling editorial features, "
"and analytics-based recommendations, Myspace creates a creative community "
"of people who connect around mutual affinity and inspiration for the purpose "
"of shaping, sharing, and discovering what's next.",
}
}
opts = {
}
optdescs = {
}
results = None
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.__dataSource__ = "MySpace.com"
self.results = self.tempStorage()
for opt in list(userOpts.keys()):
self.opts[opt] = userOpts[opt]
def watchedEvents(self):
return ["EMAILADDR", "SOCIAL_MEDIA"]
def producedEvents(self):
return ["SOCIAL_MEDIA", "GEOINFO"]
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if eventData in self.results:
return
self.results[eventData] = True
self.debug(f"Received event, {eventName}, from {srcModuleName}")
# Search by email address
if eventName == "EMAILADDR":
email = eventData
res = self.sf.fetchUrl("https://myspace.com/search/people?q=" + email,
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
if res['content'] is None:
self.error(f"Could not fetch MySpace content for {email}")
return
# Extract HTML containing potential profile matches
profiles = re.findall(r'<a href="/[a-zA-Z0-9_]+">[^<]+</a></h6>', str(res['content']))
if not profiles:
self.debug(f"No profiles found for e-mail: {email}")
return
# The first result is the closest match, but whether it's an exact match is unknown.
profile = profiles[0]
# Check for email address as name, at the risk of missed results.
try:
matches = re.findall(r'<a href=\"\/([a-zA-Z0-9_]+)\".*[\&; :\"\#\*\(\"\'\;\,\>\.\?\!]+' + email + r'[\&; :\"\#\*\)\"\'\;\,\<\.\?\!]+', profile, re.IGNORECASE)
except Exception:
self.debug("Malformed e-mail address, skipping.")
return
if not matches:
self.debug("No concrete match for that e-mail.")
return
name = matches[0]
e = SpiderFootEvent(
"SOCIAL_MEDIA",
f"MySpace: <SFURL>https://myspace.com/{name}</SFURL>",
self.__name__,
event
)
self.notifyListeners(e)
# Retrieve location from MySpace profile
if eventName == "SOCIAL_MEDIA":
try:
network = eventData.split(": ")[0]
url = eventData.split(": ")[1].replace("<SFURL>", "").replace("</SFURL>", "")
except Exception as e:
self.debug(f"Unable to parse SOCIAL_MEDIA: {eventData} ({e})")
return
if network != "MySpace":
self.debug(f"Skipping social network profile, {url}, as not a MySpace profile")
return
res = self.sf.fetchUrl(url, timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
if res['content'] is None:
return
data = re.findall(r'<div class="location_[^"]+" data-display-text="(.+?)"', res['content'])
if not data:
return
location = data[0]
if len(location) < 5 or len(location) > 100:
self.debug("Skipping likely invalid location.")
return
e = SpiderFootEvent("GEOINFO", location, self.__name__, event)
self.notifyListeners(e)
# End of sfp_myspace class
| mit | b1c3931942805ba9d3597ebe5415d9d5 | 33.993289 | 174 | 0.515343 | 4.161213 | false | false | false | false |
byu-dml/metalearn | metalearn/metafeatures/base.py | 1 | 4530 | from collections import abc
import inspect
import typing
from metalearn.metafeatures.constants import ProblemType, MetafeatureGroup
class ResourceComputer:
"""
Decorates ``computer``, a resource computing function with metadata about that function.
Parameters
----------
computer
The function that computes the resources.
returns
The names of the resources that ``computer`` returns, specified in the same order as ``computer`` returns
them.
argmap
A custom map of ``computer``'s argument names to the global resource names that will be passed as
``computer``'s arguments when ``computer`` is called.
"""
def __init__(
self, computer: typing.Callable, returns: typing.Sequence[str], argmap: typing.Mapping[str, typing.Any] = None
) -> None:
argspec = inspect.getfullargspec(computer)
# TODO: If needed, add support for `computer` functions that use these types of arguments.
if (
argspec.varargs is not None or argspec.varkw is not None or argspec.defaults is not None or
len(argspec.kwonlyargs) > 0
):
raise ValueError('`computer` must use only positional arguments with no default values')
self.computer: typing.Callable = computer
self.returns: typing.Sequence[str] = returns
self.argmap = {arg_name: arg_name for arg_name in argspec.args}
if argmap is not None:
# override computer arg value with developer provided values
# Note each value in `argmap` is a global resource name (e.g. `'XSample'`) or a literal value (e.g. `5`)
self.argmap.update(argmap)
def __call__(self, *args, **kwargs):
"""
Allows a ``ResourceComputer`` instance to be callable. Just forwards all arguments on to self.computer.
"""
return self.computer(*args, **kwargs)
@property
def name(self) -> str:
"""Returns the function name of self.computer"""
return self.computer.__name__
class MetafeatureComputer(ResourceComputer):
"""
Decorates ``computer``, a metafeature computing function
with metadata about that function.
Parameters
----------
computer
The function that computes the metafeatures.
returns
The names of the metafeatures that ``computer`` returns, specified in
the same order as ``computer`` returns them.
problem_type
The type of ML problem `computer`'s metafeatures can be computed for.
groups
The metafeature groups this computer's returned metafeatures belong to.
e.g. statistical, info-theoretic, simple, etc.
argmap
A custom map of ``computer``'s argument names to the global resource names
that will be passed as ``computer``'s arguments when ``computer`` is called.
"""
def __init__(
self, computer: typing.Callable, returns: typing.Sequence[str], problem_type: ProblemType,
groups: typing.Sequence[MetafeatureGroup], argmap: typing.Mapping[str, typing.Any] = None
) -> None:
# TODO: Add support for passing a string to `returns`, not just a list?
super().__init__(computer, returns, argmap)
self.groups = groups
self.problem_type = problem_type
class collectordict(abc.Mapping):
"""
A partially mutable mapping in which keys can be set at most one time.
A LookupError is raised if a key is set more than once. Keys cannot be deleted.
For simplicity, all values must be set manually, not in __init__.
"""
# TODO: define __str__ method
dict_cls = dict
def __init__(self):
self._dict = self.dict_cls()
def __getitem__(self, key):
return self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
def __setitem__(self, key, value):
if key in self._dict:
raise LookupError(f'{key} already exists')
self._dict[key] = value
def update(self, mapping: typing.Mapping):
for key, value in mapping.items():
self[key] = value
def build_resources_info(*computers: ResourceComputer) -> collectordict:
"""
Combines multiple resource computers into a mapping of resource name to computer
"""
resources_info = collectordict()
for computer in computers:
for resource_name in computer.returns:
resources_info[resource_name] = computer
return resources_info
| mit | 8bd2d6d205e123baf61022d139a228e2 | 34.116279 | 118 | 0.645475 | 4.376812 | false | false | false | false |
byu-dml/metalearn | metalearn/metafeatures/text_metafeatures.py | 1 | 7959 | from collections import Counter
from itertools import chain
import numpy as np
from metalearn.metafeatures.common_operations import profile_distribution
from metalearn.metafeatures.base import build_resources_info, ResourceComputer, MetafeatureComputer
from metalearn.metafeatures.constants import ProblemType, MetafeatureGroup
def get_string_lengths_array_from_text_features(text_features_array):
lengths = [feature.apply(len) for feature in text_features_array]
return (lengths,)
get_string_lengths_array_from_text_features = ResourceComputer(
get_string_lengths_array_from_text_features,
["ArrayOfStringLengthsOfTextFeatures"],
{ "text_features_array": "NoNaNTextFeatures" }
)
def get_string_length_means(string_lengths_array):
means = [feature.mean() for feature in string_lengths_array]
return profile_distribution(means)
get_string_length_means = MetafeatureComputer(
get_string_length_means,
[
"MeanMeansOfStringLengthOfTextFeatures",
"StdevMeansOfStringLengthOfTextFeatures",
"SkewMeansOfStringLengthOfTextFeatures",
"KurtosisMeansOfStringLengthOfTextFeatures",
"MinMeansOfStringLengthOfTextFeatures",
"Quartile1MeansOfStringLengthOfTextFeatures",
"Quartile2MeansOfStringLengthOfTextFeatures",
"Quartile3MeansOfStringLengthOfTextFeatures",
"MaxMeansOfStringLengthOfTextFeatures"
],
ProblemType.ANY,
[MetafeatureGroup.TEXT],
{
"string_lengths_array": "ArrayOfStringLengthsOfTextFeatures"
}
)
def get_string_length_stdev(string_lengths_array):
stdevs = [feature.std() for feature in string_lengths_array]
return profile_distribution(stdevs)
get_string_length_stdev = MetafeatureComputer(
get_string_length_stdev,
[
"MeanStdDevOfStringLengthOfTextFeatures",
"StdevStdDevOfStringLengthOfTextFeatures",
"SkewStdDevOfStringLengthOfTextFeatures",
"KurtosisStdDevOfStringLengthOfTextFeatures",
"MinStdDevOfStringLengthOfTextFeatures",
"Quartile1StdDevOfStringLengthOfTextFeatures",
"Quartile2StdDevOfStringLengthOfTextFeatures",
"Quartile3StdDevOfStringLengthOfTextFeatures",
"MaxStdDevOfStringLengthOfTextFeatures"
],
ProblemType.ANY,
[MetafeatureGroup.TEXT],
{
"string_lengths_array": "ArrayOfStringLengthsOfTextFeatures"
}
)
def get_string_length_skewness(string_lengths_array):
skews = [feature.skew() for feature in string_lengths_array]
return profile_distribution(skews)
get_string_length_skewness = MetafeatureComputer(
get_string_length_skewness,
[
"MeanSkewnessOfStringLengthOfTextFeatures",
"StdevSkewnessOfStringLengthOfTextFeatures",
"SkewSkewnessOfStringLengthOfTextFeatures",
"KurtosisSkewnessOfStringLengthOfTextFeatures",
"MinSkewnessOfStringLengthOfTextFeatures",
"Quartile1SkewnessOfStringLengthOfTextFeatures",
"Quartile2SkewnessOfStringLengthOfTextFeatures",
"Quartile3SkewnessOfStringLengthOfTextFeatures",
"MaxSkewnessOfStringLengthOfTextFeatures"
],
ProblemType.ANY,
[MetafeatureGroup.TEXT],
{
"string_lengths_array": "ArrayOfStringLengthsOfTextFeatures"
}
)
def get_string_length_kurtosis(string_lengths_array):
kurtoses = [feature.kurtosis() for feature in string_lengths_array]
return profile_distribution(kurtoses)
get_string_length_kurtosis = MetafeatureComputer(
get_string_length_kurtosis,
[
"MeanKurtosisOfStringLengthOfTextFeatures",
"StdevKurtosisOfStringLengthOfTextFeatures",
"SkewKurtosisOfStringLengthOfTextFeatures",
"KurtosisKurtosisOfStringLengthOfTextFeatures",
"MinKurtosisOfStringLengthOfTextFeatures",
"Quartile1KurtosisOfStringLengthOfTextFeatures",
"Quartile2KurtosisOfStringLengthOfTextFeatures",
"Quartile3KurtosisOfStringLengthOfTextFeatures",
"MaxKurtosisOfStringLengthOfTextFeatures"
],
ProblemType.ANY,
[MetafeatureGroup.TEXT],
{
"string_lengths_array": "ArrayOfStringLengthsOfTextFeatures"
}
)
def get_mfs_for_tokens_split_by_space(text_features_array, most_common_limit):
def isnumeric(token):
try:
float(token)
except ValueError:
return False
return True
def contains_numeric(token):
return any([char.isdigit() for char in token])
def flatten_nested_list(nested_list):
return list(chain.from_iterable(nested_list))
def filter_and_aggregate(tokens_series, f):
filtered_tokens = tokens_series.combine(tokens_series.apply(f), lambda tokens, mask: np.array(tokens)[mask.astype(bool)].tolist())
return flatten_nested_list(filtered_tokens)
isnumeric = np.vectorize(isnumeric, otypes=[object])
isalnum = np.vectorize(str.isalnum, otypes=[object])
contains_numeric = np.vectorize(contains_numeric, otypes=[object])
tokens = []
numeric_tokens = []
alphanumeric_tokens = []
contains_numeric_tokens = []
for feature in text_features_array:
feature_tokens = feature.apply(str.split)
tokens.extend(flatten_nested_list(feature_tokens))
numeric_tokens.extend(filter_and_aggregate(feature_tokens, isnumeric))
alphanumeric_tokens.extend(filter_and_aggregate(feature_tokens, isalnum))
contains_numeric_tokens.extend(filter_and_aggregate(feature_tokens, contains_numeric))
token_counts = Counter(tokens)
numeric_token_counts = Counter(numeric_tokens)
alphanumeric_token_counts = Counter(alphanumeric_tokens)
contains_numeric_token_counts = Counter(contains_numeric_tokens)
number_of_tokens = len(tokens)
number_of_distinct_tokens = len(token_counts)
number_of_tokens_containing_numeric_char = len(contains_numeric_tokens)
ratio_of_distinct_tokens = 0 if number_of_tokens == 0 else (number_of_distinct_tokens / number_of_tokens)
ratio_of_tokens_containing_numeric_char = 0 if number_of_tokens == 0 else (number_of_tokens_containing_numeric_char / number_of_tokens)
return number_of_tokens, number_of_distinct_tokens, number_of_tokens_containing_numeric_char, ratio_of_distinct_tokens, ratio_of_tokens_containing_numeric_char
# todo: re-include these loops after deciding what to do with most_common_tokens,
# todo: most_common_alphanumeric_tokens, and most_common_numeric_tokens
# get most_common_alphanumeric_tokens
# most_common_alphanumeric_tokens = []
# for token in sorted(alphanumeric_token_counts, key=alphanumeric_token_counts.get, reverse=True):
# most_common_alphanumeric_tokens.append(
# {
# "token": token,
# "count": alphanumeric_token_counts[token],
# "ratio": alphanumeric_token_counts[token]/len(token_counts)
# }
# )
# if len(most_common_alphanumeric_tokens) == most_common_limit:
# break
#
# # get most_common_numeric_tokens
# most_common_numeric_tokens = []
# for token in sorted(numeric_token_counts, key=numeric_token_counts.get, reverse=True):
# most_common_numeric_tokens.append(
# {
# "token": token,
# "count": numeric_token_counts[token],
# "ratio": numeric_token_counts[token]/len(token_counts)
# }
# )
# if len(most_common_numeric_tokens) == most_common_limit:
# break
#
# # get most_common_tokens
# most_common_tokens = []
# for token in sorted(token_counts, key=token_counts.get, reverse=True):
# most_common_token_counts.append(
# {
# "token": token,
# "count": token_counts[token],
# "ratio": token_counts[token]/len(token_counts)
# }
# )
# if len(most_common_tokens) == most_common_limit:
# break
get_mfs_for_tokens_split_by_space = MetafeatureComputer(
get_mfs_for_tokens_split_by_space,
[
"NumberOfTokens",
"NumberOfDistinctTokens",
"NumberOfTokensContainingNumericChar",
"RatioOfDistinctTokens",
"RatioOfTokensContainingNumericChar"
],
ProblemType.ANY,
[MetafeatureGroup.TEXT],
{
"text_features_array": "NoNaNTextFeatures",
'most_common_limit': 10,
}
)
"""
A list of all ResourceComputer
instances in this module.
"""
resources_info = build_resources_info(
get_string_lengths_array_from_text_features
)
"""
A list of all MetafeatureComputer
instances in this module.
"""
metafeatures_info = build_resources_info(
get_string_length_means,
get_string_length_stdev,
get_string_length_skewness,
get_string_length_kurtosis,
get_mfs_for_tokens_split_by_space
)
| mit | dacb922a8302ef32cd36740811cfa265 | 31.092742 | 160 | 0.773338 | 3.036627 | false | false | false | false |
staticafi/symbiotic | lib/symbioticpy/symbiotic/targets/testcomp.py | 1 | 2706 | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
Copyright (C) 2016-2020 Marek Chalupa
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from symbiotic.utils.utils import process_grep
from symbiotic.utils import dbg
from symbiotic.exceptions import SymbioticException
try:
import benchexec.util as util
import benchexec.result as result
from benchexec.tools.template import BaseTool
except ImportError:
# fall-back solution (at least for now)
import symbiotic.benchexec.util as util
import symbiotic.benchexec.result as result
from symbiotic.benchexec.tools.template import BaseTool
from . tool import SymbioticBaseTool
try:
from benchexec.tools.template import BaseTool
except ImportError:
# fall-back solution (at least for now)
from symbiotic.benchexec.tools.template import BaseTool
from . klee import SymbioticTool as KleeTool
class SymbioticTool(KleeTool):
"""
Symbiotic tool info object
"""
def __init__(self, opts):
super().__init__(opts)
def name(self):
return 'svcomp' # if renamed, adjust models in lib\ folder
def executable(self):
"""
Find the path to the executable file that will get executed.
This method always needs to be overridden,
and most implementations will look similar to this one.
The path returned should be relative to the current directory.
"""
return util.find_executable('kleetester.py', 'bin/kleetester.py',
'scripts/kleetester.py')
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
assert len(tasks) == 1
prp = 'coverage'
prop = self._options.property
iserr = prop.errorcall()
if iserr:
calls = [x for x in prop.getcalls() if x not in ['__VERIFIER_error', '__assert_fail']]
if len(calls) == 1:
prp = calls[0]
return [executable, prp, self._options.testsuite_output] + tasks
def determine_result(self, returncode, returnsignal, output, isTimeout):
return result.RESULT_DONE
| mit | 12948e9d3a2e9b50ef3558ed33712443 | 30.835294 | 98 | 0.695861 | 3.973568 | false | false | false | false |
pricingassistant/mrq | mrq/logger.py | 1 | 3573 | from __future__ import print_function
from future.builtins import object
from future.utils import iteritems
from collections import defaultdict
import logging
import datetime
import sys
import pymongo
PY3 = sys.version_info > (3,)
def _encode_if_unicode(string):
if PY3:
return string
if isinstance(string, unicode): # pylint: disable=undefined-variable
return string.encode("utf-8", "replace")
else:
return string
def _decode_if_str(string):
if PY3:
return str(string)
if isinstance(string, str):
return string.decode("utf-8", "replace")
else:
return unicode(string) # pylint: disable=undefined-variable
class MongoHandler(logging.Handler):
""" Job/Worker-aware log handler.
We used the standard logging module before but it suffers from memory leaks
when creating lots of logger objects.
"""
def __init__(self, worker=None, mongodb_logs_size=16 * 1024 * 1024):
super(MongoHandler, self).__init__()
self.buffer = {}
self.collection = None
self.mongodb_logs_size = mongodb_logs_size
self.reset()
self.set_collection()
# Import here to avoid import loop
# pylint: disable=cyclic-import
from .context import get_current_job, get_current_worker
self.get_current_job = get_current_job
self.worker = worker
def set_collection(self):
from .context import get_current_config, connections
config = get_current_config()
collection = config["mongodb_logs"]
if collection == "1":
self.collection = connections.mongodb_logs.mrq_logs
if self.collection and self.mongodb_logs_size:
if "mrq_logs" in connections.mongodb_logs.collection_names() and not self.collection.options().get("capped"):
connections.mongodb_logs.command({"convertToCapped": "mrq_logs", "size": self.mongodb_logs_size})
elif "mrq_logs" not in connections.mongodb_logs.collection_names():
try:
connections.mongodb_logs.create_collection("mrq_logs", capped=True, size=self.mongodb_logs_size)
except pymongo.errors.OperationFailure: # The collection might have been created in the meantime
pass
def reset(self):
self.buffer = {
"workers": defaultdict(list),
"jobs": defaultdict(list)
}
def emit(self, record):
log_entry = self.format(record)
if self.collection is False:
return
log_entry = _decode_if_str(log_entry)
if self.worker is not None:
self.buffer["workers"][self.worker].append(log_entry)
if record.name == "mrq.current":
job_object = self.get_current_job()
if job_object:
self.buffer["jobs"][job_object.id].append(log_entry)
def flush(self):
# We may log some stuff before we are even connected to Mongo!
if not self.collection:
return
inserts = [{
"worker": k,
"logs": "\n".join(v) + "\n"
} for k, v in iteritems(self.buffer["workers"])] + [{
"job": k,
"logs": "\n".join(v) + "\n"
} for k, v in iteritems(self.buffer["jobs"])]
if len(inserts) == 0:
return
self.reset()
try:
self.collection.insert(inserts)
except Exception as e: # pylint: disable=broad-except
self.emit("Log insert failed: %s" % e)
| mit | d3e447978660a1b13016c67489aa5b47 | 30.342105 | 121 | 0.599216 | 4.019123 | false | false | false | false |
staticafi/symbiotic | lib/symbioticpy/symbiotic/targets/predatorhp.py | 1 | 2536 | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
from benchexec.tools.predatorhp import Tool as PredatorHPTool
except ImportError:
from .. benchexec.tools.predatorhp import Tool as PredatorHPTool
from . tool import SymbioticBaseTool
from symbiotic.utils.process import runcmd
from symbiotic.utils.watch import DbgWatch
try:
from symbiotic.versions import llvm_version
except ImportError:
# the default version
llvm_version='8.0.1'
class SymbioticTool(PredatorHPTool, SymbioticBaseTool):
"""
PredatorHP integraded into Symbiotic
"""
REQUIRED_PATHS = PredatorHPTool.REQUIRED_PATHS
def __init__(self, opts):
SymbioticBaseTool.__init__(self, opts)
self._memsafety = self._options.property.memsafety()
def llvm_version(self):
"""
Return required version of LLVM
"""
return llvm_version
def set_environment(self, symbiotic_dir, opts):
"""
Set environment for the tool
"""
# do not link any functions
opts.linkundef = []
def passes_before_verification(self):
"""
Passes that should run before CPAchecker
"""
# llvm2c has a bug with PHI nodes
return super().passes_before_verification() +\
["-lowerswitch", "-simplifycfg", "-reg2mem", "-simplifycfg"]
def actions_before_verification(self, symbiotic):
output = symbiotic.curfile + '.c'
runcmd(['llvm2c', symbiotic.curfile, '--o', output], DbgWatch('all'))
symbiotic.curfile = output
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}):
cmd = PredatorHPTool.cmdline(self, executable, options, tasks, propertyfile, rlimits)
if self._options.is32bit:
cmd.append("--compiler-options=-m32")
else:
cmd.append("--compiler-options=-m64")
return cmd
| mit | f312e5a0506e908953a551c04e2eb4db | 29.926829 | 93 | 0.681388 | 3.807808 | false | false | false | false |
pricingassistant/mrq | mrq/basetasks/cleaning.py | 1 | 2460 | from future.builtins import str
from mrq.queue import Queue
from mrq.task import Task
from mrq.job import Job
from mrq.context import log, connections, run_task, get_current_config
import datetime
import time
class RequeueInterruptedJobs(Task):
""" Requeue jobs that were marked as status=interrupt when a worker got a SIGTERM. """
max_concurrency = 1
def run(self, params):
return run_task("mrq.basetasks.utils.JobAction", {
"status": "interrupt",
"action": "requeue_retry"
})
class RequeueRetryJobs(Task):
""" Requeue jobs that were marked as retry. """
max_concurrency = 1
def run(self, params):
print("IN")
return run_task("mrq.basetasks.utils.JobAction", {
"status": "retry",
"dateretry": {"$lte": datetime.datetime.utcnow()},
"action": "requeue_retry"
})
class RequeueStartedJobs(Task):
""" Requeue jobs that were marked as status=started and never finished.
That may be because the worker got a SIGKILL or was terminated abruptly.
The timeout parameter of this task is in addition to the task's own timeout.
"""
max_concurrency = 1
def run(self, params):
additional_timeout = params.get("timeout", 300)
stats = {
"requeued": 0,
"started": 0
}
# There shouldn't be that much "started" jobs so we can quite safely
# iterate over them.
fields = {
"_id": 1, "datestarted": 1, "queue": 1, "path": 1, "retry_count": 1, "worker": 1, "status": 1
}
for job_data in connections.mongodb_jobs.mrq_jobs.find(
{"status": "started"}, projection=fields):
job = Job(job_data["_id"])
job.set_data(job_data)
stats["started"] += 1
expire_date = datetime.datetime.utcnow(
) - datetime.timedelta(seconds=job.timeout + additional_timeout)
requeue = job_data["datestarted"] < expire_date
if not requeue:
# Check that the supposedly running worker still exists
requeue = not connections.mongodb_jobs.mrq_workers.find_one(
{"_id": job_data["worker"]}, projection={"_id": 1})
if requeue:
log.debug("Requeueing job %s" % job.id)
job.requeue()
stats["requeued"] += 1
return stats
| mit | 5318eedd9a4bbb74ba060cce4642705e | 27.941176 | 105 | 0.579268 | 4.026187 | false | false | false | false |
staticafi/symbiotic | lib/symbioticpy/symbiotic/targets/slowbeast.py | 1 | 5050 | from os.path import abspath
from .. utils import dbg
from . tool import SymbioticBaseTool
try:
import benchexec.util as util
import benchexec.result as result
from benchexec.tools.template import BaseTool
except ImportError:
# fall-back solution (at least for now)
import symbiotic.benchexec.util as util
import symbiotic.benchexec.result as result
from symbiotic.benchexec.tools.template import BaseTool
try:
from symbiotic.versions import llvm_version
except ImportError:
# the default version
llvm_version='10.0.1'
class SymbioticTool(BaseTool, SymbioticBaseTool):
REQUIRED_PATHS = ['sb', 'slowbeast']
def __init__(self, opts):
SymbioticBaseTool.__init__(self, opts)
def name(self):
return 'slowbeast'
def llvm_version(self):
return llvm_version
def executable(self):
return util.find_executable('sb', 'slowbeast/sb')
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
assert len(tasks) == 1
prp = self._options.property
exe = abspath(self.executable())
arch = '-pointer-bitwidth={0}'.format(32 if self._options.is32bit else 64)
cmd = [exe, '-se-exit-on-error', '-se-replay-errors', arch]
if prp.unreachcall():
funs = ','.join(prp.getcalls())
cmd.append(f'-error-fn={funs}')
return cmd + options + tasks
def set_environment(self, env, opts):
""" Set environment for the tool """
# do not link any functions
opts.linkundef = []
env.prepend('LD_LIBRARY_PATH', '{0}/slowbeast/'.\
format(env.symbiotic_dir))
env.reset('PYTHONOPTIMIZE', '1')
def passes_before_slicing(self):
if self._options.property.termination():
return ['-find-exits']
return []
def passes_before_verification(self):
"""
Passes that should run before slowbeast
"""
prp = self._options.property
passes = []
if prp.termination():
passes.append('-instrument-nontermination')
passes.append('-instrument-nontermination-mark-header')
passes += ["-lowerswitch", "-simplifycfg", "-reg2mem",
"-simplifycfg", "-ainline"]
passes.append("-ainline-noinline")
# FIXME: get rid of the __VERIFIER_assert hack
if prp.unreachcall():
passes.append(",".join(prp.getcalls())+f",__VERIFIER_assert,__VERIFIER_assume,assume_abort_if_not")
return passes +\
["-flatten-loops", "-O3", "-remove-constant-exprs", "-reg2mem"] +\
super().passes_before_verification()
def generate_graphml(path, source, is_correctness_wit, opts, saveto):
""" Generate trivial correctness witness for now """
if saveto is None:
saveto = '{0}.graphml'.format(basename(path))
saveto = abspath(saveto)
if is_correctness_wit:
gen.createTrivialWitness()
assert path is None
gen.write(saveto)
def determine_result(self, returncode, returnsignal, output, isTimeout):
if isTimeout:
return ''
no_path_killed = False
have_problem = False
no_errors = False
memerr = False
asserterr = False
for line in map(str, output):
if 'assertion failed!' in line:
asserterr = True
elif 'assertion failure' in line:
asserterr = True
elif '[assertion error]' in line:
asserterr = True
elif 'None: __VERIFIER_error called!' in line:
asserterr = True
elif 'Error found.' in line:
no_errors = False
elif '[memory error]' in line:
memerr = True
elif 'Killed paths: 0' in line:
no_path_killed = True
elif 'Did not extend the path and reached entry of CFG' in line or\
'a problem was met' in line or\
'Failed deciding the result.' in line:
have_problem = True
elif 'Found errors: 0' in line:
no_errors = True
if not no_errors:
if asserterr:
if self._options.property.termination():
return result.RESULT_FALSE_TERMINATION
return result.RESULT_FALSE_REACH
elif memerr:
return f"{result.RESULT_UNKNOWN}(uninit mem)"
# we do not support memsafety yet...
#return result.RESULT_FALSE_DEREF
else:
return f"{result.RESULT_UNKNOWN}(unknown-err)"
if no_errors and no_path_killed and not have_problem:
return result.RESULT_TRUE_PROP
if returncode != 0:
return f"{result.RESULT_ERROR}(returned {returncode})"
if returnsignal:
return f"{result.RESULT_ERROR}(signal {returnsignal})"
return result.RESULT_UNKNOWN
| mit | 894e70921c34658d05497e8dc838e1c5 | 34.56338 | 111 | 0.578416 | 4.095702 | false | false | false | false |
staticafi/symbiotic | scripts/kleetester.py | 1 | 7693 | #!/usr/bin/python3
from subprocess import Popen, PIPE, STDOUT
from time import sleep
from sys import stderr
def runcmd(cmd):
print("[kleetester] {0}".format(" ".join(cmd)), file=stderr)
stderr.flush()
try:
p = Popen(cmd, stdout=PIPE, stderr=STDOUT)
except OSError as e:
print(str(e), file=stderr)
return None
return p
def gentest(bitcode, outdir, prp, suffix=None, params=None):
options = ['-use-forked-solver=0', '--use-call-paths=0',
'--output-stats=0', '-istats-write-interval=60s',
'-timer-interval=10', '-external-calls=pure',
'-write-testcases', '-malloc-symbolic-contents',
'-max-memory=8000', '-output-source=false']
if prp != 'coverage':
options.append(f'-error-fn={prp}')
options.append('-exit-on-error-type=Assert')
options.append('-dump-states-on-halt=0')
else:
options.append('-only-output-states-covering-new=1')
options.append('-max-time=840')
if params:
options.extend(params)
cmd = ['klee', f'-output-dir={outdir}']
if suffix:
cmd.append(f'-testcases-suffix={suffix}')
cmd.extend(options)
cmd.append(bitcode)
return runcmd(cmd)
def find_criterions(bitcode):
newbitcode = f"{bitcode}.tpr.bc"
# FIXME: generate it directly from slicer (multiple slices)
# FIXME: modify the code also such that any path that avoids the criterion
# is aborted
cmd = ['opt', '-load', 'LLVMsbt.so', '-get-test-targets',
'-o', newbitcode, bitcode]
p = runcmd(cmd)
if p is None:
return None, None
out, errs = p.communicate()
if p.poll() != 0:
print(errs)
print(out)
return None, None
if out and out != '':
return newbitcode, (crit.decode('utf-8', 'ignore') for crit in out.splitlines())
return None, None
def constrain_to_target(bitcode, target):
newbitcode = f"{bitcode}.opt.bc"
cmd = ['opt', '-load', 'LLVMsbt.so', '-constraint-to-target',
f'-ctt-target={target}', '-O3', '-o', newbitcode, bitcode]
p = runcmd(cmd)
if p is None:
return None
ret = p.wait()
if ret != 0:
out, errs = p.communicate()
print(out, file=stderr)
print(errs, file=stderr)
return None
return newbitcode
def sliceprocess(bitcode, crit):
bitcode = constrain_to_target(bitcode, crit)
if bitcode is None:
return None, None
slbitcode = f"{bitcode}-{crit}.bc"
cmd = ['timeout', '120', 'llvm-slicer', '-c', crit,
'-o', slbitcode, bitcode]
return runcmd(cmd), slbitcode
def optimize(bitcode):
newbitcode = f"{bitcode}.opt.bc"
cmd = ['opt', '-load', 'LLVMsbt.so', '-O3', '-remove-infinite-loops',
'-O2', '-o', newbitcode, bitcode]
p = runcmd(cmd)
ret = p.wait()
return newbitcode
def check_error(outs, errs):
for line in outs.splitlines():
if b'ASSERTION FAIL: ' in line:
print('Found ERROR!', file=stderr)
return True
return False
def main(argv):
if len(argv) != 4:
exit(1)
prp = argv[1]
outdir = argv[2]
bitcode = argv[3]
generators = []
# run KLEE on the original bitcode
print("\n--- Running the main KLEE --- ", file=stderr)
maingen = gentest(bitcode, outdir, prp)
if maingen:
generators.append(maingen)
bitcodewithcrits, crits = find_criterions(bitcode)
if bitcodewithcrits:
# The later crits are likely deeper in the code.
# Since run use only part of them, use those.
crits = list(crits)
crits.reverse()
for n, crit in enumerate(crits):
print(f"\n--- Targeting at {crit} target --- ", file=stderr)
if prp == 'coverage' and maingen and maingen.poll() is not None:
break # the main process finished, we can finish too
# slice bitcode
p, slicedcode = sliceprocess(bitcodewithcrits, crit)
if p is None:
print(f'Slicing w.r.t {crit} FAILED', file=stderr)
continue
print(f'Starget slicing w.r.t {crit}, waiting for the job...', file=stderr)
ret = p.wait()
if ret != 0:
out, errs = p.communicate()
print(f'Slicing w.r.t {crit} FAILED', file=stderr)
if ret == 124:
break # one timeouted, others will too...
print(out, file=stderr)
print(errs, file=stderr)
continue
print(f'Slicing w.r.t {crit} done', file=stderr)
slicedcode = optimize(slicedcode)
if slicedcode is None:
print("Optimizing failed", file=stderr)
continue
# generate tests
if prp == 'coverage' and maingen and maingen.poll() is not None:
break # the main process finished, we can finish too
p = gentest(slicedcode, outdir, prp, suffix=str(n),
params=['--search=dfs', '--use-batching-search'])
if p is None:
continue
generators.append(p)
newgens = []
for p in generators:
if p.poll() is not None:
if prp != 'coverage':
if check_error(*p.communicate()):
for gen in generators:
if gen.poll() is not None:
gen.kill()
exit(0)
else:
newgens.append(p)
generators = newgens
# run atmost 8 at once
while len(generators) >= 8:
if prp == 'coverage' and maingen and maingen.poll() is not None:
break # the main process finished, we can finish too
print("Got enough test generators, waiting for some to finish...",
file=stderr)
sleep(2) # sleep 2 seconds
for p in generators:
if p.poll() is not None:
if prp != 'coverage':
if check_error(*p.communicate()):
for gen in generators:
if gen.poll() is not None:
gen.kill()
exit(0)
else:
newgens.append(p)
# some processes finished
generators = newgens
print(f"\n--- All targets running --- ", file=stderr)
stderr.flush()
while generators:
print(f"Have {len(generators)} test generators running", file=stderr)
stderr.flush()
newgens = []
for p in generators:
if p.poll() is not None:
if prp != 'coverage':
if check_error(*p.communicate()):
for gen in generators:
if gen.poll() is not None:
gen.kill()
exit(0)
else:
newgens.append(p)
generators = newgens
if generators:
sleep(2) # sleep 2 seconds
print(f"\n--- All KLEE finished --- ", file=stderr)
if prp == 'coverage':
# if all finished, then also the main KLEE finished,
# and we can remove the files from side KLEE's -- those
# are superfluous
runcmd(['rm', '-f', f"{outdir}/test*.*.xml"])
if __name__ == "__main__":
from sys import argv
main(argv)
| mit | 71d97ff2a1721a3fc79aa6f6864b6515 | 33.34375 | 88 | 0.519693 | 3.915013 | false | false | false | false |
staticafi/symbiotic | lib/symbioticpy/symbiotic/targets/esbmc.py | 1 | 5971 | """
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
Copyright (C) 2019-2021 Marek Chalupa
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import xml.etree.ElementTree as ET
try:
import benchexec.util as util
import benchexec.result as result
from benchexec.tools.template import BaseTool
except ImportError:
# fall-back solution (at least for now)
import symbiotic.benchexec.util as util
import symbiotic.benchexec.result as result
from symbiotic.benchexec.tools.template import BaseTool
from symbiotic.utils.process import runcmd
from symbiotic.utils.watch import DbgWatch
from . tool import SymbioticBaseTool
try:
from symbiotic.versions import llvm_version
except ImportError:
# the default version
llvm_version='10.0.1'
class SymbioticTool(BaseTool, SymbioticBaseTool):
def __init__(self, opts, only_results=None):
""" only_results = if not none, report only these results as real,
otherwise report 'unknown'. Used to implement incremental BMC.
"""
SymbioticBaseTool.__init__(self, opts)
opts.explicit_symbolic = True
self._only_results = only_results
def executable(self):
return util.find_executable("esbmc-wrapper.py")
def version(self, executable):
return self._version_from_tool(executable, "-v")
def name(self):
return "ESBMC"
def slicer_options(self):
""" Override slicer options: do not slice bodies of funs
that are slicing criteria. CBMC uses the assertions inside,
not the calls themselves.
"""
prp = self._options.property
if not self._options.full_instrumentation and prp.signedoverflow():
return (['__symbiotic_check_overflow'], ['-criteria-are-next-instr'])
sc, opts = super().slicer_options()
return (sc, opts + ['--preserved-functions={0}'.format(','.join(sc))])
def instrumentation_options(self):
"""
Returns a triple (d, c, l, x) where d is the directory
with configuration files, c is the configuration
file for instrumentation (or None if no instrumentation
should be performed), l is the
file with definitions of the instrumented functions
and x is True if the definitions should be linked after
instrumentation (and False otherwise)
"""
if not self._options.full_instrumentation and\
self._options.property.signedoverflow():
return ('int_overflows',
self._options.overflow_config_file or 'config-marker.json',
'overflows-marker.c', False)
return super().instrumentation_options()
def cmdline(self, executable, options, tasks, propertyfile, rlimits):
if "--arch" not in options:
options += ["--arch", "32" if self._options.is32bit else "64"]
return (
[executable]
+ ["-p", propertyfile]
+ options
+ tasks
)
def determine_result(self, returncode, returnsignal, output, isTimeout):
status = result.RESULT_UNKNOWN
unknw = False
for line in map(str, output):
if "FALSE_DEREF" in line:
status = result.RESULT_FALSE_DEREF
elif "FALSE_FREE" in line:
status = result.RESULT_FALSE_FREE
elif "FALSE_MEMTRACK" in line:
status = result.RESULT_FALSE_MEMTRACK
elif "FALSE_OVERFLOW" in line:
status = result.RESULT_FALSE_OVERFLOW
elif "FALSE_TERMINATION" in line:
status = result.RESULT_FALSE_TERMINATION
elif "FALSE" in line:
status = result.RESULT_FALSE_REACH
elif "TRUE" in line:
status = result.RESULT_TRUE_PROP
elif "DONE" in line:
status = result.RESULT_DONE
if "Unknown" in line:
unknw = True;
if status == result.RESULT_UNKNOWN:
if isTimeout:
status = "TIMEOUT"
elif not unknw:
status = "ERROR"
return status
def llvm_version(self):
"""
Return required version of LLVM
"""
return llvm_version
def set_environment(self, symbiotic_dir, opts):
"""
Set environment for the tool
"""
# do not link any functions
opts.linkundef = []
def passes_before_verification(self):
"""
Passes that should run before CPAchecker
"""
# LLVM backend in CPAchecker does not handle switches correctly yet
return super().passes_before_verification() + ["-reg2mem", "-lowerswitch", "-simplifycfg"]
def passes_before_slicing(self):
if self._options.property.termination():
return ['-find-exits', '-use-exit']
return []
def actions_before_verification(self, symbiotic):
# link our specific funs
self._options.linkundef = ['verifier']
symbiotic.link_undefined(only_func=['__VERIFIER_silent_exit','__VERIFIER_exit'])
self._options.linkundef = []
# translate to C
output = symbiotic.curfile + '.c'
runcmd(['llvm2c', symbiotic.curfile, '--add-includes', '--o', output],
DbgWatch('all'))
symbiotic.curfile = output
| mit | db5ba5e504659eb5b02763927a3f20e1 | 34.123529 | 98 | 0.624853 | 4.190175 | false | false | false | false |
byu-dml/metalearn | metalearn/metafeatures/simple_metafeatures.py | 1 | 5630 | import numpy as np
from metalearn.metafeatures.common_operations import *
from metalearn.metafeatures.base import build_resources_info, MetafeatureComputer
from metalearn.metafeatures.constants import ProblemType, MetafeatureGroup
def get_dataset_stats(X, column_types):
number_of_instances = X.shape[0]
number_of_features = X.shape[1]
numeric_features = len(get_numeric_features(X, column_types))
categorical_features = number_of_features - numeric_features
ratio_of_numeric_features = numeric_features / number_of_features
ratio_of_categorical_features = categorical_features / number_of_features
return (number_of_instances, number_of_features, numeric_features, categorical_features, ratio_of_numeric_features, ratio_of_categorical_features)
get_dataset_stats = MetafeatureComputer(
get_dataset_stats,
[
"NumberOfInstances",
"NumberOfFeatures",
"NumberOfNumericFeatures",
"NumberOfCategoricalFeatures",
"RatioOfNumericFeatures",
"RatioOfCategoricalFeatures"
],
ProblemType.ANY,
[MetafeatureGroup.SIMPLE],
{ "X": "X_raw" }
)
def get_dimensionality(number_of_features, number_of_instances):
dimensionality = number_of_features / number_of_instances
return (dimensionality,)
get_dimensionality = MetafeatureComputer(
get_dimensionality,
["Dimensionality"],
ProblemType.ANY,
[MetafeatureGroup.SIMPLE],
{
"number_of_features": "NumberOfFeatures",
"number_of_instances": "NumberOfInstances"
}
)
def get_missing_values(X):
missing_values_by_instance = X.shape[1] - X.count(axis=1)
missing_values_by_feature = X.shape[0] - X.count(axis=0)
number_missing = int(np.sum(missing_values_by_instance)) # int for json compatibility
ratio_missing = number_missing / (X.shape[0] * X.shape[1])
number_instances_with_missing = int(np.sum(missing_values_by_instance != 0)) # int for json compatibility
ratio_instances_with_missing = number_instances_with_missing / X.shape[0]
number_features_with_missing = int(np.sum(missing_values_by_feature != 0))
ratio_features_with_missing = number_features_with_missing / X.shape[1]
return (
number_missing, ratio_missing, number_instances_with_missing,
ratio_instances_with_missing, number_features_with_missing,
ratio_features_with_missing
)
get_missing_values = MetafeatureComputer(
get_missing_values,
[
"NumberOfMissingValues",
"RatioOfMissingValues",
"NumberOfInstancesWithMissingValues",
"RatioOfInstancesWithMissingValues",
"NumberOfFeaturesWithMissingValues",
"RatioOfFeaturesWithMissingValues"
],
ProblemType.ANY,
[MetafeatureGroup.SIMPLE],
{ "X": "X_raw" }
)
def get_class_stats(Y):
classes = Y.unique()
number_of_classes = classes.shape[0]
counts = [sum(Y == label) for label in classes]
probs = [count/Y.shape[0] for count in counts]
majority_class_size = max(counts)
minority_class_size = min(counts)
return (number_of_classes, *profile_distribution(probs), minority_class_size, majority_class_size)
get_class_stats = MetafeatureComputer(
computer=get_class_stats,
returns=[
"NumberOfClasses",
"MeanClassProbability",
"StdevClassProbability",
"SkewClassProbability",
"KurtosisClassProbability",
"MinClassProbability",
"Quartile1ClassProbability",
"Quartile2ClassProbability",
"Quartile3ClassProbability",
"MaxClassProbability",
"MinorityClassSize",
"MajorityClassSize"
],
problem_type=ProblemType.CLASSIFICATION,
groups=[
MetafeatureGroup.SIMPLE,
MetafeatureGroup.TARGET_DEPENDENT
]
)
def get_categorical_cardinalities(X, column_types):
cardinalities = [X[feature].unique().shape[0] for feature in get_categorical_features(X, column_types)]
return profile_distribution(cardinalities)
get_categorical_cardinalities = MetafeatureComputer(
get_categorical_cardinalities,
[
"MeanCardinalityOfCategoricalFeatures",
"StdevCardinalityOfCategoricalFeatures",
"SkewCardinalityOfCategoricalFeatures",
"KurtosisCardinalityOfCategoricalFeatures",
"MinCardinalityOfCategoricalFeatures",
"Quartile1CardinalityOfCategoricalFeatures",
"Quartile2CardinalityOfCategoricalFeatures",
"Quartile3CardinalityOfCategoricalFeatures",
"MaxCardinalityOfCategoricalFeatures"
],
ProblemType.ANY,
[MetafeatureGroup.SIMPLE],
)
def get_numeric_cardinalities(X, column_types):
cardinalities = [X[feature].unique().shape[0] for feature in get_numeric_features(X, column_types)]
return profile_distribution(cardinalities)
get_numeric_cardinalities = MetafeatureComputer(
get_numeric_cardinalities,
[
"MeanCardinalityOfNumericFeatures",
"StdevCardinalityOfNumericFeatures",
"SkewCardinalityOfNumericFeatures",
"KurtosisCardinalityOfNumericFeatures",
"MinCardinalityOfNumericFeatures",
"Quartile1CardinalityOfNumericFeatures",
"Quartile2CardinalityOfNumericFeatures",
"Quartile3CardinalityOfNumericFeatures",
"MaxCardinalityOfNumericFeatures"
],
ProblemType.ANY,
[MetafeatureGroup.SIMPLE]
)
"""
A list of all MetafeatureComputer
instances in this module.
"""
metafeatures_info = build_resources_info(
get_dataset_stats,
get_class_stats,
get_dimensionality,
get_missing_values,
get_categorical_cardinalities,
get_numeric_cardinalities
)
| mit | 00ee03a0daf60e399d20565ca1553369 | 32.511905 | 150 | 0.709591 | 3.758344 | false | false | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/pages/migrations/0020_auto_20180501_1341.py | 2 | 1450 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-05-01 13:41
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('pages', '0019_auto_20171102_1856'),
]
operations = [
migrations.AlterField(
model_name='standardindexpage',
name='template_string',
field=models.CharField(choices=[('pages/standard_index_page.html', 'Default Template'), ('pages/standard_index_page_grid.html', 'Grid Also In This Section')], default='pages/standard_index_page.html', max_length=255),
),
migrations.AlterField(
model_name='standardpage',
name='body',
field=wagtail.core.fields.StreamField((('paragraph', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock()), ('html', wagtail.core.blocks.RawHTMLBlock()))),
),
migrations.AlterField(
model_name='videogallerypagecarouselitem',
name='embed_url',
field=models.URLField(blank=True, verbose_name='Embed URL'),
),
migrations.AlterField(
model_name='videogallerypagecarouselitem',
name='link_external',
field=models.URLField(blank=True, verbose_name='External link'),
),
]
| mit | 1c023dbfb15e7dae910e383f37f581be | 37.157895 | 229 | 0.637241 | 4.142857 | false | false | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/people/migrations/0006_auto_20180607_1804.py | 2 | 1100 | # Generated by Django 2.0 on 2018-06-07 18:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('people', '0005_personindexpage_feed_image'),
]
operations = [
migrations.AlterField(
model_name='personindexpage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='personpage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='personpagetag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='people_personpagetag_items', to='taggit.Tag'),
),
]
| mit | 346be093b1ede9d91c0b2b3f9c608a46 | 36.931034 | 179 | 0.647273 | 3.956835 | false | false | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/pages/migrations/0022_auto_20180607_1804.py | 2 | 1975 | # Generated by Django 2.0 on 2018-06-07 18:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pages', '0021_auto_20180607_1434'),
]
operations = [
migrations.AlterField(
model_name='faqspage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='homepage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='standardindexpage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='standardpage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='videogallerypagecarouselitem',
name='link_document',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtaildocs.Document'),
),
migrations.AlterField(
model_name='videogallerypagecarouselitem',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
]
| mit | baa8db72626cf7817864860679954009 | 43.886364 | 179 | 0.644051 | 3.934263 | false | false | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/utils/models.py | 2 | 3021 | from django.db import models
from wagtail.admin.edit_handlers import (
FieldPanel, MultiFieldPanel, PageChooserPanel
)
from wagtail.core.fields import RichTextField
from wagtail.documents.edit_handlers import DocumentChooserPanel
from wagtail.images.edit_handlers import ImageChooserPanel
class LinkFields(models.Model):
link_external = models.URLField("External link", blank=True)
link_page = models.ForeignKey(
'wagtailcore.Page',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
link_document = models.ForeignKey(
'wagtaildocs.Document',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
@property
def link(self):
if self.link_page:
return self.link_page.url
elif self.link_document:
return self.link_document.url
else:
return self.link_external
panels = [
FieldPanel('link_external'),
PageChooserPanel('link_page'),
DocumentChooserPanel('link_document'),
]
class Meta:
abstract = True
class ContactFields(models.Model):
name_organization = models.CharField(max_length=255, blank=True)
telephone = models.CharField(max_length=20, blank=True)
telephone_2 = models.CharField(max_length=20, blank=True)
email = models.EmailField(blank=True)
email_2 = models.EmailField(blank=True)
address_1 = models.CharField(max_length=255, blank=True)
address_2 = models.CharField(max_length=255, blank=True)
city = models.CharField(max_length=255, blank=True)
country = models.CharField(max_length=255, blank=True)
post_code = models.CharField(max_length=10, blank=True)
panels = [
FieldPanel('name_organization',
'The full/formatted name of the person or organisation'),
FieldPanel('telephone'),
FieldPanel('telephone_2'),
FieldPanel('email'),
FieldPanel('email_2'),
FieldPanel('address_1'),
FieldPanel('address_2'),
FieldPanel('city'),
FieldPanel('country'),
FieldPanel('post_code'),
]
class Meta:
abstract = True
# Carousel items
class CarouselItem(LinkFields):
image = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
embed_url = models.URLField("Embed URL", blank=True)
caption = RichTextField(blank=True)
panels = [
ImageChooserPanel('image'),
FieldPanel('embed_url'),
FieldPanel('caption'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
# Related links
class RelatedLink(LinkFields):
title = models.CharField(max_length=255, help_text="Link title")
panels = [
FieldPanel('title'),
MultiFieldPanel(LinkFields.panels, "Link"),
]
class Meta:
abstract = True
| mit | 2b514fa81644a4c700922055157669e8 | 25.973214 | 76 | 0.633234 | 3.918288 | false | false | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/pages/migrations/0013_videopage_videopagecarouselitem.py | 2 | 2701 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-08 13:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0019_delete_filter'),
('wagtaildocs', '0007_merge'),
('wagtailcore', '0039_collectionviewrestriction'),
('pages', '0012_auto_20170606_1319'),
]
operations = [
migrations.CreateModel(
name='VideoPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('template_string', models.CharField(choices=[(b'pages/video_gallery_page.html', b'Videos Page')], default=b'pages/video_gallery_page.html', max_length=255)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='VideoPageCarouselItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name=b'External link')),
('embed_url', models.URLField(blank=True, verbose_name=b'Embed URL')),
('caption', wagtail.core.fields.RichTextField(blank=True)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='pages.VideoPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| mit | 75b7bba8ee04f700ecc968c92c7ae205 | 50.942308 | 191 | 0.612366 | 3.931587 | false | false | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/products/migrations/0008_auto_20180607_1804.py | 2 | 1109 | # Generated by Django 2.0 on 2018-06-07 18:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('products', '0007_productindexpage_feed_image'),
]
operations = [
migrations.AlterField(
model_name='productindexpage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='productpage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='productpagetag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products_productpagetag_items', to='taggit.Tag'),
),
]
| mit | 5c7793c2697aa735edad5e6a365460c0 | 37.241379 | 179 | 0.650135 | 3.989209 | false | false | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/gallery/models.py | 2 | 4587 | import logging
from django.conf import settings
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.admin.edit_handlers import FieldPanel, MultiFieldPanel
from wagtail.core.fields import RichTextField
from wagtail.core.models import Page
from wagtail.images.models import Image
from wagtail.images.edit_handlers import ImageChooserPanel
class PhotoGalleryIndexPage(Page):
intro = RichTextField(blank=True)
feed_image = models.ForeignKey(
Image,
help_text="An optional image to represent the page",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
indexed_fields = ('intro')
@property
def galleries(self):
galleries = GalleryIndex.objects.live().descendant_of(self)
galleries = galleries.order_by('-first_published_at')
return galleries
def get_context(self, request):
galleries = self.galleries
page = request.GET.get('page')
paginator = Paginator(galleries, 16)
try:
galleries = paginator.page(page)
except PageNotAnInteger:
galleries = paginator.page(1)
except EmptyPage:
galleries = paginator.page(paginator.num_pages)
context = super(PhotoGalleryIndexPage, self).get_context(request)
context['galleries'] = galleries
return context
class Meta:
verbose_name = _('Photo Gallery Index')
PhotoGalleryIndexPage.content_panels = [
FieldPanel('title', classname="full title"),
FieldPanel('intro', classname="full"),
]
PhotoGalleryIndexPage.promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
IMAGE_ORDER_TYPES = (
(1, 'Image title'),
(2, 'Newest image first'),
)
class GalleryIndex(Page):
intro = RichTextField(
blank=True,
verbose_name=_('Intro text'),
help_text=_('Optional text to go with the intro text.')
)
collection = models.ForeignKey(
'wagtailcore.Collection',
verbose_name=_('Collection'),
null=True,
blank=False,
on_delete=models.SET_NULL,
related_name='+',
help_text=_('Show images in this collection in the gallery view.')
)
images_per_page = models.IntegerField(
default=20,
verbose_name=_('Images per page'),
help_text=_('How many images there should be on one page.')
)
order_images_by = models.IntegerField(choices=IMAGE_ORDER_TYPES, default=1)
feed_image = models.ForeignKey(
Image,
help_text="An optional image to represent the page",
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
content_panels = Page.content_panels + [
FieldPanel('intro', classname='full title'),
FieldPanel('collection'),
FieldPanel('images_per_page', classname='full title'),
FieldPanel('order_images_by'),
]
promote_panels = [
MultiFieldPanel(Page.promote_panels, "Common page configuration"),
ImageChooserPanel('feed_image'),
]
@property
def images(self):
return get_gallery_images(self.collection.name, self)
def get_context(self, request):
images = self.images
page = request.GET.get('page')
paginator = Paginator(images, self.images_per_page)
try:
images = paginator.page(page)
except PageNotAnInteger:
images = paginator.page(1)
except EmptyPage:
images = paginator.page(paginator.num_pages)
context = super(GalleryIndex, self).get_context(request)
context['gallery_images'] = images
return context
class Meta:
verbose_name = _('Photo Gallery')
verbose_name_plural = _('Photo Galleries')
template = getattr(settings, 'GALLERY_TEMPLATE', 'gallery/gallery_index.html')
def get_gallery_images(collection, page=None, tags=None):
images = None
try:
images = Image.objects.filter(collection__name=collection)
if page:
if page.order_images_by == 0:
images = images.order_by('title')
elif page.order_images_by == 1:
images = images.order_by('-created_at')
except Exception as e:
logging.exception(e)
if images and tags:
images = images.filter(tags__name__in=tags).distinct()
return images
| mit | 09d86bf895cc8249d388b78b8e6dbf4d | 29.377483 | 82 | 0.63789 | 4.04141 | false | false | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/users/tests/test_views.py | 2 | 1800 | from django.test import RequestFactory, TestCase
from ..views import UserRedirectView, UserUpdateView
class BaseUserTestCase(TestCase):
def setUp(self):
self.user = self.make_user()
self.factory = RequestFactory()
class TestUserRedirectView(BaseUserTestCase):
def test_get_redirect_url(self):
# Instantiate the view directly. Never do this outside a test!
view = UserRedirectView()
# Generate a fake request
request = self.factory.get('/fake-url')
# Attach the user to the request
request.user = self.user
# Attach the request to the view
view.request = request
# Expect: '/users/testuser/', as that is the default username for
# self.make_user()
self.assertEqual(
view.get_redirect_url(),
'/users/dashboard/'
)
class TestUserUpdateView(BaseUserTestCase):
def setUp(self):
# call BaseUserTestCase.setUp()
super(TestUserUpdateView, self).setUp()
# Instantiate the view directly. Never do this outside a test!
self.view = UserUpdateView()
# Generate a fake request
request = self.factory.get('/fake-url')
# Attach the user to the request
request.user = self.user
# Attach the request to the view
self.view.request = request
def test_get_success_url(self):
# Expect: '/users/testuser/', as that is the default username for
# self.make_user()
self.assertEqual(
self.view.get_success_url(),
'/users/dashboard/'
)
def test_get_object(self):
# Expect: self.user, as that is the request's user object
self.assertEqual(
self.view.get_object(),
self.user
)
| mit | f4ba2e4f1635f19ceb6bfbd0d12a684f | 31.142857 | 73 | 0.613889 | 4.316547 | false | true | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/users/migrations/0001_initial.py | 1 | 3721 | # Generated by Django 2.1.7 on 2019-06-14 14:44
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(default='', max_length=255, verbose_name='Full Name')),
('address', models.TextField(default='', verbose_name='Address')),
('city', models.CharField(default='', max_length=255, verbose_name='City or Town')),
('state', models.CharField(blank=True, default='', max_length=255, verbose_name='State/Province')),
('country_of_residence', models.CharField(default='', max_length=255, verbose_name='Country')),
('job', models.CharField(default='', max_length=255, verbose_name='Job Title or Occupation')),
('organisation', models.CharField(default='', max_length=255, verbose_name='Organisation')),
('tos', models.BooleanField(default=True, verbose_name='I have read and agree with the terms of Service')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| mit | 6f5670d48fcbbb7fe969633669bad03d | 71.960784 | 329 | 0.637463 | 4.331781 | false | false | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/pages/migrations/0008_auto_20161220_1345.py | 3 | 2161 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-20 13:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pages', '0007_socialmediasettings'),
]
operations = [
migrations.AlterField(
model_name='socialmediasettings',
name='facebook',
field=models.URLField(blank=True, help_text='Your Facebook page URL', null=True),
),
migrations.AlterField(
model_name='socialmediasettings',
name='facebook_appid',
field=models.CharField(blank=True, help_text='Your Facbook AppID', max_length=255, null=True),
),
migrations.AlterField(
model_name='socialmediasettings',
name='github',
field=models.URLField(blank=True, help_text='Your Github URL', max_length=255, null=True),
),
migrations.AlterField(
model_name='socialmediasettings',
name='instagram',
field=models.URLField(blank=True, help_text='Your Instagram URL', max_length=255, null=True),
),
migrations.AlterField(
model_name='socialmediasettings',
name='linkedin',
field=models.URLField(blank=True, help_text='Your Linkedin URL', max_length=255, null=True),
),
migrations.AlterField(
model_name='socialmediasettings',
name='twitter_name',
field=models.URLField(blank=True, help_text='Your Twitter URL', max_length=255, null=True),
),
migrations.AlterField(
model_name='socialmediasettings',
name='youtube',
field=models.URLField(blank=True, help_text='Your YouTube Channel URL', null=True),
),
migrations.AlterField(
model_name='standardpage',
name='template_string',
field=models.CharField(choices=[('pages/standard_page.html', 'Default Template'), ('pages/standard_page_full.html', 'Standard Page Full')], default='pages/standard_page.html', max_length=255),
),
]
| mit | 3ce9de2f1160bbeb6a2ec93711a870ab | 38.290909 | 204 | 0.603887 | 4.253937 | false | false | false | false |
chrisdev/wagtail-cookiecutter-foundation | {{cookiecutter.project_slug}}/pages/migrations/0016_sitebranding.py | 2 | 1105 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-10 14:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0019_delete_filter'),
('wagtailcore', '0040_page_draft_title'),
('pages', '0015_advert_button_text'),
]
operations = [
migrations.CreateModel(
name='SiteBranding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site_name', models.CharField(blank=True, max_length=250, null=True)),
('logo', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
]
| mit | a42833194fa2ec259d73061b6a0acf10 | 35.833333 | 157 | 0.59638 | 3.904594 | false | false | false | false |
chubin/cheat.sh | lib/cheat_wrapper.py | 1 | 3573 | """
Main cheat.sh wrapper.
Parse the query, get answers from getters (using get_answer),
visualize it using frontends and return the result.
Exports:
cheat_wrapper()
"""
import re
import json
from routing import get_answers, get_topics_list
from search import find_answers_by_keyword
from languages_data import LANGUAGE_ALIAS, rewrite_editor_section_name
import postprocessing
import frontend.html
import frontend.ansi
def _add_section_name(query):
# temporary solution before we don't find a fixed one
if ' ' not in query and '+' not in query:
return query
if '/' in query:
return query
if ' ' in query:
return re.sub(r' +', '/', query, count=1)
if '+' in query:
# replace only single + to avoid catching g++ and friends
return re.sub(r'([^\+])\+([^\+])', r'\1/\2', query, count=1)
def cheat_wrapper(query, request_options=None, output_format='ansi'):
"""
Function that delivers cheat sheet for `query`.
If `html` is True, the answer is formatted as HTML.
Additional request options specified in `request_options`.
"""
def _rewrite_aliases(word):
if word == ':bash.completion':
return ':bash_completion'
return word
def _rewrite_section_name(query):
"""
Rewriting special section names:
* EDITOR:NAME => emacs:go-mode
"""
if '/' not in query:
return query
section_name, rest = query.split('/', 1)
if ':' in section_name:
section_name = rewrite_editor_section_name(section_name)
section_name = LANGUAGE_ALIAS.get(section_name, section_name)
return "%s/%s" % (section_name, rest)
def _sanitize_query(query):
return re.sub('[<>"]', '', query)
def _strip_hyperlink(query):
return re.sub('(,[0-9]+)+$', '', query)
def _parse_query(query):
topic = query
keyword = None
search_options = ""
keyword = None
if '~' in query:
topic = query
pos = topic.index('~')
keyword = topic[pos+1:]
topic = topic[:pos]
if '/' in keyword:
search_options = keyword[::-1]
search_options = search_options[:search_options.index('/')]
keyword = keyword[:-len(search_options)-1]
return topic, keyword, search_options
query = _sanitize_query(query)
query = _add_section_name(query)
query = _rewrite_aliases(query)
query = _rewrite_section_name(query)
# at the moment, we just remove trailing slashes
# so queries python/ and python are equal
# query = _strip_hyperlink(query.rstrip('/'))
topic, keyword, search_options = _parse_query(query)
if keyword:
answers = find_answers_by_keyword(
topic, keyword, options=search_options, request_options=request_options)
else:
answers = get_answers(topic, request_options=request_options)
answers = [
postprocessing.postprocess(
answer, keyword, search_options, request_options=request_options)
for answer in answers
]
answer_data = {
'query': query,
'keyword': keyword,
'answers': answers,
}
if output_format == 'html':
answer_data['topics_list'] = get_topics_list()
return frontend.html.visualize(answer_data, request_options)
elif output_format == 'json':
return json.dumps(answer_data, indent=4)
return frontend.ansi.visualize(answer_data, request_options)
| mit | 4954f6f7e888d03d8ccd70f599f85438 | 28.528926 | 84 | 0.604534 | 3.965594 | false | false | false | false |
pyvisa/pyvisa-py | pyvisa_py/protocols/usbtmc.py | 2 | 14241 | # -*- coding: utf-8 -*-
"""Implements Session to control USBTMC instruments
Loosely based on PyUSBTMC:python module to handle USB-TMC(Test and
Measurement class) devices. by Noboru Yamamot, Accl. Lab, KEK, JAPAN
This file is an offspring of the Lantz Project.
:copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
import enum
import struct
import time
import warnings
from collections import namedtuple
import usb
from .usbutil import find_devices, find_endpoint, find_interfaces, usb_find_desc
class MsgID(enum.IntEnum):
"""From USB-TMC table2"""
dev_dep_msg_out = 1
request_dev_dep_msg_in = 2
dev_dep_msg_in = 2
vendor_specific_out = 126
request_vendor_specific_in = 127
vendor_specific_in = 127
# USB488
trigger = 128
class Request(enum.IntEnum):
initiate_abort_bulk_out = 1
check_abort_bulk_out_status = 2
initiate_abort_bulk_in = 3
check_abort_bulk_in_status = 4
initiate_clear = 5
check_clear_status = 6
get_capabilities = 7
indicator_pulse = 64
# USB488
read_status_byte = 128
ren_control = 160
go_to_local = 161
local_lockout = 162
class UsbTmcStatus(enum.IntEnum):
success = 1
pending = 2
failed = 0x80
transfer_not_in_progress = 0x81
split_not_in_progress = 0x82
split_in_progress = 0x83
UsbTmcCapabilities = namedtuple("UsbTmcCapabilities", "usb488 ren_control trigger")
def find_tmc_devices(
vendor=None, product=None, serial_number=None, custom_match=None, **kwargs
):
"""Find connected USBTMC devices. See usbutil.find_devices for more info."""
def is_usbtmc(dev):
if custom_match and not custom_match(dev):
return False
return bool(find_interfaces(dev, bInterfaceClass=0xFE, bInterfaceSubClass=3))
return find_devices(vendor, product, serial_number, is_usbtmc, **kwargs)
class BulkOutMessage(object):
"""The Host uses the Bulk-OUT endpoint to send USBTMC command messages to
the device.
"""
@staticmethod
def build_array(btag, eom, chunk):
size = len(chunk)
return (
struct.pack("BBBx", MsgID.dev_dep_msg_out, btag, ~btag & 0xFF)
+ struct.pack("<LBxxx", size, eom)
+ chunk
+ b"\0" * ((4 - size) % 4)
)
class BulkInMessage(
namedtuple(
"BulkInMessage",
"msgid btag btaginverse " "transfer_size transfer_attributes data",
)
):
"""The Host uses the Bulk-IN endpoint to read USBTMC response messages from
the device.
The Host must first send a USBTMC command message that expects a response
before attempting to read a USBTMC response message.
"""
@classmethod
def from_bytes(cls, data):
msgid, btag, btaginverse = struct.unpack_from("BBBx", data)
if msgid != MsgID.dev_dep_msg_in:
warnings.warn(
"Unexpected MsgID format. Consider updating the device's firmware. See https://github.com/pyvisa/pyvisa-py/issues/20"
)
return BulkInMessage.from_quirky(data)
transfer_size, transfer_attributes = struct.unpack_from("<LBxxx", data, 4)
# Truncate data to the specified length (discard padding).
data = data[12 : 12 + transfer_size]
return cls(msgid, btag, btaginverse, transfer_size, transfer_attributes, data)
@classmethod
def from_quirky(cls, data):
"""Constructs a correct response for quirky devices."""
msgid, btag, btaginverse = struct.unpack_from("BBBx", data)
data = data.rstrip(b"\x00")
# check whether it contains a ';' and if throw away the first 12 bytes
if b";" in data:
transfer_size, transfer_attributes = struct.unpack_from("<LBxxx", data, 4)
data = data[12:]
else:
transfer_size = 0
transfer_attributes = 1
return cls(msgid, btag, btaginverse, transfer_size, transfer_attributes, data)
@staticmethod
def build_array(btag, transfer_size, term_char=None):
"""
:param transfer_size:
:param btag:
:param term_char:
:return:
"""
if term_char is None:
transfer_attributes = 0
term_char = 0
else:
transfer_attributes = 2
return struct.pack(
"BBBx", MsgID.request_dev_dep_msg_in, btag, ~btag & 0xFF
) + struct.pack("<LBBxx", transfer_size, transfer_attributes, term_char)
class USBRaw(object):
"""Base class for drivers that communicate with instruments
via usb port using pyUSB
"""
#: Configuration number to be used. If None, the default will be used.
CONFIGURATION = None
#: Interface index it be used
INTERFACE = (0, 0)
#: Receive and Send endpoints to be used. If None the first IN (or OUT)
#: BULK endpoint will be used.
ENDPOINTS = (None, None)
find_devices = staticmethod(find_devices)
def __init__(
self,
vendor=None,
product=None,
serial_number=None,
device_filters=None,
timeout=None,
**kwargs
):
super(USBRaw, self).__init__()
# Timeout expressed in ms as an integer and limited to 2**32-1
# If left to None pyusb will use its default value
self.timeout = timeout
device_filters = device_filters or {}
devices = list(
self.find_devices(vendor, product, serial_number, None, **device_filters)
)
if not devices:
raise ValueError("No device found.")
elif len(devices) > 1:
desc = "\n".join(str(dev) for dev in devices)
raise ValueError(
"{} devices found:\n{}\nPlease narrow the search"
" criteria".format(len(devices), desc)
)
self.usb_dev = devices[0]
try:
if self.usb_dev.is_kernel_driver_active(0):
self.usb_dev.detach_kernel_driver(0)
except (usb.core.USBError, NotImplementedError):
pass
try:
self.usb_dev.set_configuration()
except usb.core.USBError as e:
raise Exception("failed to set configuration\n %s" % e)
try:
self.usb_dev.set_interface_altsetting()
except usb.core.USBError:
pass
self.usb_intf = self._find_interface(self.usb_dev, self.INTERFACE)
self.usb_recv_ep, self.usb_send_ep = self._find_endpoints(
self.usb_intf, self.ENDPOINTS
)
def _find_interface(self, dev, setting):
return self.usb_dev.get_active_configuration()[self.INTERFACE]
def _find_endpoints(self, interface, setting):
recv, send = setting
if recv is None:
recv = find_endpoint(interface, usb.ENDPOINT_IN, usb.ENDPOINT_TYPE_BULK)
else:
recv = usb_find_desc(interface, bEndpointAddress=recv)
if send is None:
send = find_endpoint(interface, usb.ENDPOINT_OUT, usb.ENDPOINT_TYPE_BULK)
else:
send = usb_find_desc(interface, bEndpointAddress=send)
return recv, send
def write(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
"""
try:
return self.usb_send_ep.write(data)
except usb.core.USBError as e:
raise ValueError(str(e))
def read(self, size):
"""Receive raw bytes to the instrument.
:param size: number of bytes to receive
:return: received bytes
:return type: bytes
"""
if size <= 0:
size = 1
data = self.usb_recv_ep.read(size, self.timeout).tobytes()
return data
def close(self):
return usb.util.dispose_resources(self.usb_dev)
class USBTMC(USBRaw):
# Maximum number of bytes per transfer (for sending and receiving).
RECV_CHUNK = 1024**2
find_devices = staticmethod(find_tmc_devices)
def __init__(self, vendor=None, product=None, serial_number=None, **kwargs):
super(USBTMC, self).__init__(vendor, product, serial_number, **kwargs)
self.usb_intr_in = find_endpoint(
self.usb_intf, usb.ENDPOINT_IN, usb.ENDPOINT_TYPE_INTERRUPT
)
self.usb_dev.reset()
self.usb_dev.set_configuration()
time.sleep(0.01)
self._capabilities = self._get_capabilities()
self._btag = 0
if not (self.usb_recv_ep and self.usb_send_ep):
msg = "TMC device must have both Bulk-In and Bulk-out endpoints."
raise ValueError(msg)
self._enable_remote_control()
def _enable_remote_control(self):
if not self._capabilities.ren_control:
return
self.usb_dev.ctrl_transfer(
usb.util.build_request_type(
usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE,
),
Request.ren_control,
1,
self.usb_intf.index,
1,
timeout=self.timeout,
)
def _get_capabilities(self):
c = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(
usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_INTERFACE,
),
Request.get_capabilities,
0x0000,
self.usb_intf.index,
0x0018,
timeout=self.timeout,
)
usb488_capabilities = c[0xE]
# bit #2: The interface is a 488.2 USB488 interface.
# bit #1: The interface accepts REN_CONTROL, GO_TO_LOCAL,
# and LOCAL_LOCKOUT requests.
# bit #0: The interface accepts the MsgID = TRIGGER
# USBTMC command message and forwards
# TRIGGER requests to the Function Layer.
return UsbTmcCapabilities(
usb488=bool(usb488_capabilities & (1 << 2)),
ren_control=bool(usb488_capabilities & (1 << 1)),
trigger=bool(usb488_capabilities & (1 << 0)),
)
def _find_interface(self, dev, setting):
interfaces = find_interfaces(dev, bInterfaceClass=0xFE, bInterfaceSubClass=3)
if not interfaces:
raise ValueError("USB TMC interface not found.")
elif len(interfaces) > 1:
pass
return interfaces[0]
def _abort_bulk_in(self, btag):
"""Request that the device abort a pending Bulk-IN operation."""
abort_timeout_ms = 5000
# Send INITIATE_ABORT_BULK_IN.
# According to USBTMC 1.00 4.2.1.4:
# wValue = bTag value of transfer to be aborted
# wIndex = Bulk-IN endpoint
# wLength = 0x0002 (length of device response)
data = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(
usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_ENDPOINT,
),
Request.initiate_abort_bulk_in,
btag,
self.usb_recv_ep.bEndpointAddress,
0x0002,
timeout=abort_timeout_ms,
)
if data[0] != UsbTmcStatus.success:
# Abort Bulk-IN failed. Ignore it.
return
# Read remaining data from Bulk-IN endpoint.
self.usb_recv_ep.read(self.RECV_CHUNK, abort_timeout_ms)
# Send CHECK_ABORT_BULK_IN_STATUS until it completes.
# According to USBTMC 1.00 4.2.1.5:
# wValue = 0x0000
# wIndex = Bulk-IN endpoint
# wLength = 0x0008 (length of device response)
for retry in range(100):
data = self.usb_dev.ctrl_transfer(
usb.util.build_request_type(
usb.util.CTRL_IN,
usb.util.CTRL_TYPE_CLASS,
usb.util.CTRL_RECIPIENT_ENDPOINT,
),
Request.check_abort_bulk_in_status,
0x0000,
self.usb_recv_ep.bEndpointAddress,
0x0008,
timeout=abort_timeout_ms,
)
if data[0] != UsbTmcStatus.pending:
break
time.sleep(0.05)
def write(self, data):
"""Send raw bytes to the instrument.
:param data: bytes to be sent to the instrument
:type data: bytes
"""
begin, end, size = 0, 0, len(data)
bytes_sent = 0
raw_write = super(USBTMC, self).write
# Send all data via one or more Bulk-OUT transfers.
# Set the EOM flag on the last transfer only.
# Send at least one transfer (possibly empty).
while (end == 0) or (end < size):
begin, end = end, begin + self.RECV_CHUNK
self._btag = (self._btag % 255) + 1
eom = end >= size
data = BulkOutMessage.build_array(self._btag, eom, data[begin:end])
bytes_sent += raw_write(data)
return size
def read(self, size):
recv_chunk = self.RECV_CHUNK
if size > 0 and size < recv_chunk:
recv_chunk = size
header_size = 12
max_padding = 511
eom = False
raw_read = super(USBTMC, self).read
raw_write = super(USBTMC, self).write
received = bytearray()
while not eom:
self._btag = (self._btag % 255) + 1
req = BulkInMessage.build_array(self._btag, recv_chunk, None)
raw_write(req)
try:
resp = raw_read(recv_chunk + header_size + max_padding)
response = BulkInMessage.from_bytes(resp)
except (usb.core.USBError, ValueError):
# Abort failed Bulk-IN operation.
self._abort_bulk_in(self._btag)
raise
received.extend(response.data)
# Detect EOM only when device sends all expected bytes.
if len(response.data) >= response.transfer_size:
eom = response.transfer_attributes & 1
return bytes(received)
| mit | db8f27c5a101d4f383e411c266161ac2 | 28.851153 | 133 | 0.58115 | 3.806202 | false | false | false | false |
coleifer/huey | huey/signals.py | 3 | 1190 | import itertools
SIGNAL_CANCELED = 'canceled'
SIGNAL_COMPLETE = 'complete'
SIGNAL_ERROR = 'error'
SIGNAL_EXECUTING = 'executing'
SIGNAL_EXPIRED = 'expired'
SIGNAL_LOCKED = 'locked'
SIGNAL_RETRYING = 'retrying'
SIGNAL_REVOKED = 'revoked'
SIGNAL_SCHEDULED = 'scheduled'
SIGNAL_INTERRUPTED = "interrupted"
class Signal(object):
__slots__ = ('receivers',)
def __init__(self):
self.receivers = {'any': []}
def connect(self, receiver, *signals):
if not signals:
signals = ('any',)
for signal in signals:
self.receivers.setdefault(signal, [])
self.receivers[signal].append(receiver)
def disconnect(self, receiver, *signals):
if not signals:
signals = list(self.receivers)
for signal in signals:
try:
self.receivers[signal].remove(receiver)
except ValueError:
pass
def send(self, signal, task, *args, **kwargs):
receivers = itertools.chain(self.receivers.get(signal, ()),
self.receivers['any'])
for receiver in receivers:
receiver(signal, task, *args, **kwargs)
| mit | fcd2739e186fe845e68451ee1e0cd80f | 27.333333 | 67 | 0.591597 | 4.089347 | false | false | false | false |
coleifer/huey | huey/tests/test_signals.py | 3 | 5612 | import datetime
from huey.signals import *
from huey.tests.base import BaseTestCase
class TestSignals(BaseTestCase):
def setUp(self):
super(TestSignals, self).setUp()
self._state = []
@self.huey.signal()
def signal_handle(signal, task, *args):
self._state.append((signal, task, args))
def assertSignals(self, expected):
self.assertEqual([s[0] for s in self._state], expected)
self._state = []
def test_signals_simple(self):
@self.huey.task()
def task_a(n):
return n + 1
r = task_a(3)
self.assertSignals([])
self.assertEqual(self.execute_next(), 4)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
r = task_a.schedule((2,), delay=60)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_SCHEDULED])
r = task_a(None)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR])
def test_signal_complete_result_ready(self):
@self.huey.task()
def task_a(n):
return n + 1
results = []
@self.huey.signal(SIGNAL_COMPLETE)
def on_complete(sig, task, *_):
results.append(self.huey.result(task.id))
r = task_a(2)
self.assertEqual(self.execute_next(), 3)
self.assertEqual(results, [3])
def test_signals_on_retry(self):
@self.huey.task(retries=1)
def task_a(n):
return n + 1
r = task_a(None)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR, SIGNAL_RETRYING])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR])
@self.huey.task(retries=1, retry_delay=60)
def task_b(n):
return n + 1
r = task_b(None)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_ERROR, SIGNAL_RETRYING,
SIGNAL_SCHEDULED])
def test_signals_revoked(self):
@self.huey.task()
def task_a(n):
return n + 1
task_a.revoke(revoke_once=True)
r = task_a(2)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_REVOKED])
r = task_a(3)
self.assertEqual(self.execute_next(), 4)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
def test_signals_locked(self):
@self.huey.task()
@self.huey.lock_task('lock-a')
def task_a(n):
return n + 1
r = task_a(1)
self.assertSignals([])
self.assertEqual(self.execute_next(), 2)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
with self.huey.lock_task('lock-a'):
r = task_a(2)
self.assertSignals([])
self.assertTrue(self.execute_next() is None)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_LOCKED])
def test_signal_expired(self):
@self.huey.task(expires=10)
def task_a(n):
return n + 1
now = datetime.datetime.now()
expires = now + datetime.timedelta(seconds=15)
r = task_a(2)
self.assertSignals([])
self.assertTrue(self.execute_next(expires) is None)
self.assertSignals([SIGNAL_EXPIRED])
r = task_a(3)
self.assertSignals([])
self.assertTrue(self.execute_next(), 4)
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
def test_specific_handler(self):
extra_state = []
@self.huey.signal(SIGNAL_EXECUTING)
def extra_handler(signal, task):
extra_state.append(task.args[0])
@self.huey.task()
def task_a(n):
return n + 1
r = task_a(3)
self.assertEqual(extra_state, [])
self.assertEqual(self.execute_next(), 4)
self.assertEqual(extra_state, [3])
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
r2 = task_a(1)
self.assertEqual(self.execute_next(), 2)
self.assertEqual(extra_state, [3, 1])
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
self.huey.disconnect_signal(extra_handler, SIGNAL_EXECUTING)
r3 = task_a(2)
self.assertEqual(self.execute_next(), 3)
self.assertEqual(extra_state, [3, 1])
self.assertSignals([SIGNAL_EXECUTING, SIGNAL_COMPLETE])
def test_multi_handlers(self):
state1 = []
state2 = []
@self.huey.signal(SIGNAL_EXECUTING, SIGNAL_COMPLETE)
def handler1(signal, task):
state1.append(signal)
@self.huey.signal(SIGNAL_EXECUTING, SIGNAL_COMPLETE)
def handler2(signal, task):
state2.append(signal)
@self.huey.task()
def task_a(n):
return n + 1
r = task_a(1)
self.assertEqual(self.execute_next(), 2)
self.assertEqual(state1, ['executing', 'complete'])
self.assertEqual(state2, ['executing', 'complete'])
self.huey.disconnect_signal(handler1, SIGNAL_COMPLETE)
self.huey.disconnect_signal(handler2)
r2 = task_a(2)
self.assertEqual(self.execute_next(), 3)
self.assertEqual(state1, ['executing', 'complete', 'executing'])
self.assertEqual(state2, ['executing', 'complete'])
| mit | 2ba96f948eb0a1ae35b72b9676ec1c0c | 30.005525 | 77 | 0.585175 | 3.682415 | false | true | false | false |
coleifer/huey | examples/django_ex/djangoex/test_app/tasks.py | 3 | 1139 | import time
from huey import crontab
from huey.contrib.djhuey import task, periodic_task, db_task
def tprint(s, c=32):
# Helper to print messages from within tasks using color, to make them
# stand out in examples.
print('\x1b[1;%sm%s\x1b[0m' % (c, s))
# Tasks used in examples.
@task()
def add(a, b):
return a + b
@task()
def mul(a, b):
return a * b
@db_task() # Opens DB connection for duration of task.
def slow(n):
tprint('going to sleep for %s seconds' % n)
time.sleep(n)
tprint('finished sleeping for %s seconds' % n)
return n
@task(retries=1, retry_delay=5, context=True)
def flaky_task(task=None):
if task is not None and task.retries == 0:
tprint('flaky task succeeded on retry.')
return 'succeeded on retry.'
tprint('flaky task is about to raise an exception.', 31)
raise Exception('flaky task failed!')
# Periodic tasks.
@periodic_task(crontab(minute='*/2'))
def every_other_minute():
tprint('This task runs every 2 minutes.', 35)
@periodic_task(crontab(minute='*/5'))
def every_five_mins():
tprint('This task runs every 5 minutes.', 34)
| mit | cab19176462cc531c97718369d76f0b7 | 21.333333 | 74 | 0.656716 | 3.112022 | false | false | false | false |
mapbox/mapbox-sdk-py | mapbox/services/matrix.py | 1 | 5455 | """Matrix API V1"""
import re
import warnings
from mapbox.encoding import encode_waypoints
from mapbox.errors import InvalidProfileError, MapboxDeprecationWarning
from mapbox.services.base import Service
class DirectionsMatrix(Service):
"""Access to the Matrix API V1"""
api_name = 'directions-matrix'
api_version = 'v1'
valid_profiles = [
'mapbox/driving', 'mapbox/cycling', 'mapbox/walking',
'mapbox/driving-traffic']
valid_annotations = ['duration', 'distance']
@property
def baseuri(self):
return 'https://{0}/{1}/{2}'.format(
self.host, self.api_name, self.api_version)
def _validate_profile(self, profile):
# Support for Distance v1 and Directions v4 profiles
profiles_map = {
'mapbox.driving': 'mapbox/driving',
'mapbox.cycling': 'mapbox/cycling',
'mapbox.walking': 'mapbox/walking',
'driving': 'mapbox/driving',
'cycling': 'mapbox/cycling',
'walking': 'mapbox/walking'}
if profile in profiles_map:
profile = profiles_map[profile]
warnings.warn("Converting deprecated profile, use {} instead".format(profile),
MapboxDeprecationWarning)
if profile not in self.valid_profiles:
raise InvalidProfileError(
"{0} is not a valid profile".format(profile))
return profile
def _validate_annotations(self, annotations):
results = []
if annotations is None:
return None
for annotation in annotations:
if annotation not in self.valid_annotations:
raise errors.InvalidParameterError(
"{0} is not a valid annotation".format(annotation))
else:
results.append(annotation)
return results
def _make_query(self, srcindexes, dstindexes):
params = {}
if srcindexes is not None and isinstance(srcindexes, list):
params['sources'] = ';'.join([str(idx) for idx in srcindexes])
if dstindexes is not None and isinstance(dstindexes, list):
params['destinations'] = ';'.join([str(idx) for idx in dstindexes])
return params
def matrix(self, coordinates, profile='mapbox/driving',
sources=None, destinations=None, annotations=None):
"""Request a directions matrix for trips between coordinates
In the default case, the matrix returns a symmetric matrix,
using all input coordinates as sources and destinations. You may
also generate an asymmetric matrix, with only some coordinates
as sources or destinations:
Parameters
----------
coordinates : sequence
A sequence of coordinates, which may be represented as
GeoJSON features, GeoJSON geometries, or (longitude,
latitude) pairs.
profile : str
The trip travel mode. Valid modes are listed in the class's
valid_profiles attribute.
annotations : list
Used to specify the resulting matrices. Possible values are
listed in the class's valid_annotations attribute.
sources : list
Indices of source coordinates to include in the matrix.
Default is all coordinates.
destinations : list
Indices of destination coordinates to include in the
matrix. Default is all coordinates.
Returns
-------
requests.Response
Note: the directions matrix itself is obtained by calling the
response's json() method. The resulting mapping has a code,
the destinations and the sources, and depending of the
annotations specified, it can also contain a durations matrix,
a distances matrix or both of them (by default, only the
durations matrix is provided).
code : str
Status of the response
sources : list
Results of snapping selected coordinates to the nearest
addresses.
destinations : list
Results of snapping selected coordinates to the nearest
addresses.
durations : list
An array of arrays representing the matrix in row-major
order. durations[i][j] gives the travel time from the i-th
source to the j-th destination. All values are in seconds.
The duration between the same coordinate is always 0. If
a duration can not be found, the result is null.
distances : list
An array of arrays representing the matrix in row-major
order. distances[i][j] gives the distance from the i-th
source to the j-th destination. All values are in meters.
The distance between the same coordinate is always 0. If
a distance can not be found, the result is null.
"""
annotations = self._validate_annotations(annotations)
profile = self._validate_profile(profile)
coords = encode_waypoints(coordinates)
params = self._make_query(sources, destinations)
if annotations is not None:
params.update({'annotations': ','.join(annotations)})
uri = '{0}/{1}/{2}'.format(self.baseuri, profile, coords)
res = self.session.get(uri, params=params)
self.handle_http_error(res)
return res
| mit | 07a1d65b48b3272911bd81bbd8c2771b | 38.528986 | 90 | 0.622181 | 4.755885 | false | false | false | false |
olemb/dbfread | tests/test_field_parser.py | 1 | 4465 | import datetime
from decimal import Decimal
from pytest import raises
from dbfread.field_parser import FieldParser
class MockHeader(object):
dbversion = 0x02
class MockDBF(object):
def __init__(self):
self.header = MockHeader()
self.encoding = 'ascii'
self.char_decode_errors = 'strict'
class MockField(object):
def __init__(self, type='', **kwargs):
self.type = type
self.__dict__.update(kwargs)
class MockMemoFile(dict):
def __getitem__(self, index):
if index == 0:
return None
else:
return dict.__getitem__(self, index)
def make_field_parser(field_type, dbversion=0x02, memofile=None):
dbf = MockDBF()
dbf.header.dbversion = dbversion
parser = FieldParser(dbf, memofile)
field = MockField(field_type)
def parse(data):
return parser.parse(field, data)
return parse
def test_0():
parse = make_field_parser('0')
assert parse(b'\0') == b'\x00'
assert parse(b'\xaa\xff') == b'\xaa\xff'
def test_C():
parse = make_field_parser('C')
assert type(parse(b'test')) == type(u'')
def test_D():
parse = make_field_parser('D')
assert parse(b'00000000') is None
assert parse(b' ') is None
epoch = datetime.date(1970, 1, 1)
assert parse(b'19700101') == epoch
with raises(ValueError):
parse(b'NotIntgr')
def test_F():
parse = make_field_parser('F')
assert parse(b'') is None
assert parse(b' ') is None
assert parse(b'0') == 0
assert parse(b'1') == 1
assert parse(b'-1') == -1
assert parse(b'3.14') == 3.14
# In some files * is used for padding.
assert parse(b'0.01**') == 0.01
assert parse(b'******') is None
with raises(ValueError):
parse(b'jsdf')
# This also tests parse2B() (+)
def test_I():
parse = make_field_parser('I')
# Little endian unsigned integer.
assert parse(b'\x00\x00\x00\x00') == 0
assert parse(b'\x01\x00\x00\x00') == 1
assert parse(b'\xff\xff\xff\xff') == -1
def test_L():
parse = make_field_parser('L')
for char in b'TtYy':
assert parse(char) is True
for char in b'FfNn':
assert parse(char) is False
for char in b'? ':
assert parse(char) is None
# Some invalid values.
for char in b'!0':
with raises(ValueError):
parse(char)
# This also tests B, G and P.
def test_M():
parse = make_field_parser('M', memofile=MockMemoFile({1: b'test'}))
assert parse(b'\x01\x00\x00\x00') == u'test'
assert parse(b'1') == u'test'
assert parse(b'') is None
with raises(ValueError):
parse(b'NotInteger')
def test_B():
# In VisualFox the B field is a double precision floating point number.
parse = make_field_parser('B', dbversion=0x30)
assert isinstance(parse(b'01abcdef'), float)
assert parse(b'\0' * 8) == 0.0
# Data must be exactly 8 bytes.
with raises(Exception):
parse(b'')
# In other db versions it is a memo index.
parse = make_field_parser('B', dbversion=0x02,
memofile=MockMemoFile({1: b'test'}))
parse(b'1') == b'test'
parse(b'') is None
def test_N():
parse = make_field_parser('N')
assert parse(b'') is None
assert parse(b' ') is None
assert parse(b'1') == 1
assert parse(b'-99') == -99
assert parse(b'3.14') == 3.14
# In some files * is used for padding.
assert parse(b'0.01**') == 0.01
assert parse(b'******') is None
with raises(ValueError):
parse(b'okasd')
def test_O():
"""Test double field."""
parse = make_field_parser('O')
assert parse(b'\x00' * 8) == 0.0
assert parse(b'\x00\x00\x00\x00\x00\x00\xf0?') == 1.0
assert parse(b'\x00\x00\x00\x00\x00\x00Y\xc0') == -100
# This also tests parse40() (@)
def test_T():
parse = make_field_parser('T')
assert parse(b'') is None
assert parse(b' ') is None
# Todo: add more tests.
def test_Y():
parse = make_field_parser('Y')
assert parse(b'\1\0\0\0\0\0\0\0') == Decimal('0.0001')
assert parse(b'\xff\xff\xff\xff\xff\xff\xff\xff') == Decimal('-0.0001')
def test_hex_field():
class PlusFieldParser(FieldParser):
encoding = 'latin1'
def parse3F(self, field, data):
"""Parser for '?' field."""
return None
parser = PlusFieldParser(MockDBF())
field = MockField('?')
parser.parse(field, b'test')
| mit | a9842f217a19e9b1a0911337ee913e40 | 23.805556 | 75 | 0.58701 | 3.164422 | false | true | false | false |
olemb/dbfread | dbfread/struct_parser.py | 1 | 1188 | """
Parser that converts (C style) binary structs named tuples.
The struct can be read from a file or a byte string.
"""
import struct
def _make_struct_class(name, names):
class Struct(object):
_names = names
def __init__(self, **kwargs):
vars(self).update(kwargs)
def __repr__(self):
fields = ', '.join('{}={!r}'.format(name, getattr(self, name))
for name in self._names)
return '{}({})'.format(self.__class__.__name__, fields)
Struct.__name__ = name
return Struct
class StructParser:
def __init__(self, name, format, names):
self.format = format
self.names = names
self.struct = struct.Struct(format)
self.Class = _make_struct_class(name, names)
self.size = self.struct.size
def unpack(self, data):
"""Unpack struct from binary string and return a named tuple."""
items = zip(self.names, self.struct.unpack(data))
return self.Class(**dict(items))
def read(self, file):
"""Read struct from a file-like object (implenting read())."""
return self.unpack(file.read(self.struct.size))
| mit | 3cae82c1699f1c12f0221c735f4c3a44 | 27.97561 | 74 | 0.58165 | 3.973244 | false | false | false | false |
olemb/dbfread | docs/conf.py | 4 | 7855 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import dbfread
from dbfread import __version__
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dbfread'
copyright = u'Ole Martin Bjørndalen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dbfreaddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'dbfread.tex', u'dbfread Documentation',
u'Ole Martin Bjørndalen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dbfread', u'dbfread Documentation',
[u'Ole Martin Bjørndalen'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dbfread', u'dbfread Documentation',
u'Ole Martin Bjørndalen', 'dbfread', 'Read DBF files with Python',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit | 83b7896c44d2c0b53da753dadaf1ae21 | 30.53012 | 80 | 0.702076 | 3.742135 | false | false | false | false |
mitmproxy/mitmproxy | examples/contrib/webscanner_helper/urlinjection.py | 2 | 7115 | import abc
import html
import json
import logging
from mitmproxy import flowfilter
from mitmproxy.http import HTTPFlow
logger = logging.getLogger(__name__)
class InjectionGenerator:
"""Abstract class for an generator of the injection content in order to inject the URL index."""
ENCODING = "UTF8"
@abc.abstractmethod
def inject(self, index, flow: HTTPFlow):
"""Injects the given URL index into the given flow."""
class HTMLInjection(InjectionGenerator):
"""Injects the URL index either by creating a new HTML page or by appending is to an existing page."""
def __init__(self, insert: bool = False):
"""Initializes the HTMLInjection.
Args:
insert: boolean to decide whether to insert the URL index to an existing page (True) or to create a new
page containing the URL index.
"""
self.insert = insert
@classmethod
def _form_html(cls, url):
return f"<form action=\"{url}\" method=\"POST\"></form>"
@classmethod
def _link_html(cls, url):
return f"<a href=\"{url}\">link to {url}</a>"
@classmethod
def index_html(cls, index):
link_htmls = []
for scheme_netloc, paths in index.items():
for path, methods in paths.items():
url = scheme_netloc + path
if "POST" in methods:
link_htmls.append(cls._form_html(url))
if "GET" in methods:
link_htmls.append(cls._link_html(url))
return "</ br>".join(link_htmls)
@classmethod
def landing_page(cls, index):
return (
"<head><meta charset=\"UTF-8\"></head><body>"
+ cls.index_html(index)
+ "</body>"
)
def inject(self, index, flow: HTTPFlow):
if flow.response is not None:
if flow.response.status_code != 404 and not self.insert:
logger.warning(
f"URL '{flow.request.url}' didn't return 404 status, "
f"index page would overwrite valid page.")
elif self.insert:
content = (flow.response
.content
.decode(self.ENCODING, "backslashreplace"))
if "</body>" in content:
content = content.replace("</body>", self.index_html(index) + "</body>")
else:
content += self.index_html(index)
flow.response.content = content.encode(self.ENCODING)
else:
flow.response.content = (self.landing_page(index)
.encode(self.ENCODING))
class RobotsInjection(InjectionGenerator):
"""Injects the URL index by creating a new robots.txt including the URLs."""
def __init__(self, directive="Allow"):
self.directive = directive
@classmethod
def robots_txt(cls, index, directive="Allow"):
lines = ["User-agent: *"]
for scheme_netloc, paths in index.items():
for path, methods in paths.items():
lines.append(directive + ": " + path)
return "\n".join(lines)
def inject(self, index, flow: HTTPFlow):
if flow.response is not None:
if flow.response.status_code != 404:
logger.warning(
f"URL '{flow.request.url}' didn't return 404 status, "
f"index page would overwrite valid page.")
else:
flow.response.content = self.robots_txt(index,
self.directive).encode(
self.ENCODING)
class SitemapInjection(InjectionGenerator):
"""Injects the URL index by creating a new sitemap including the URLs."""
@classmethod
def sitemap(cls, index):
lines = [
"<?xml version=\"1.0\" encoding=\"UTF-8\"?><urlset xmlns=\"http://www.sitemaps.org/schemas/sitemap/0.9\">"]
for scheme_netloc, paths in index.items():
for path, methods in paths.items():
url = scheme_netloc + path
lines.append(f"<url><loc>{html.escape(url)}</loc></url>")
lines.append("</urlset>")
return "\n".join(lines)
def inject(self, index, flow: HTTPFlow):
if flow.response is not None:
if flow.response.status_code != 404:
logger.warning(
f"URL '{flow.request.url}' didn't return 404 status, "
f"index page would overwrite valid page.")
else:
flow.response.content = self.sitemap(index).encode(self.ENCODING)
class UrlInjectionAddon:
""" The UrlInjection add-on can be used in combination with web application scanners to improve their crawling
performance.
The given URls will be injected into the web application. With this, web application scanners can find pages to
crawl much easier. Depending on the Injection generator, the URLs will be injected at different places of the
web application. It is possible to create a landing page which includes the URL (HTMLInjection()), to inject the
URLs to an existing page (HTMLInjection(insert=True)), to create a robots.txt containing the URLs
(RobotsInjection()) or to create a sitemap.xml which includes the URLS (SitemapInjection()).
It is necessary that the web application scanner can find the newly created page containing the URL index. For
example, the newly created page can be set as starting point for the web application scanner.
The URL index needed for the injection can be generated by the UrlIndex Add-on.
"""
def __init__(self, flt: str, url_index_file: str,
injection_gen: InjectionGenerator):
"""Initializes the UrlIndex add-on.
Args:
flt: mitmproxy filter to decide on which pages the URLs will be injected (str).
url_index_file: Path to the file which includes the URL index in JSON format (e.g. generated by the UrlIndexAddon), given
as str.
injection_gen: InjectionGenerator that should be used to inject the URLs into the web application.
"""
self.name = f"{self.__class__.__name__}-{injection_gen.__class__.__name__}-{self.__hash__()}"
self.flt = flowfilter.parse(flt)
self.injection_gen = injection_gen
with open(url_index_file) as f:
self.url_store = json.load(f)
def response(self, flow: HTTPFlow):
"""Checks if the response matches the filter and such should be injected.
Injects the URL index if appropriate.
"""
if flow.response is not None:
if self.flt is not None and self.flt(flow):
self.injection_gen.inject(self.url_store, flow)
flow.response.status_code = 200
flow.response.headers["content-type"] = "text/html"
logger.debug(f"Set status code to 200 and set content to logged "
f"urls. Method: {self.injection_gen}")
| mit | 1a1d7cdacca98802767ec8907a17b053 | 40.366279 | 133 | 0.590443 | 4.33313 | false | false | false | false |
mitmproxy/mitmproxy | mitmproxy/tools/console/grideditor/col_text.py | 1 | 1290 | """
Welcome to the encoding dance!
In a nutshell, text columns are actually a proxy class for byte columns,
which just encode/decodes contents.
"""
from mitmproxy.tools.console import signals
from mitmproxy.tools.console.grideditor import col_bytes
class Column(col_bytes.Column):
def __init__(self, heading, encoding="utf8", errors="surrogateescape"):
super().__init__(heading)
self.encoding_args = encoding, errors
def Display(self, data):
return TDisplay(data, self.encoding_args)
def Edit(self, data):
return TEdit(data, self.encoding_args)
def blank(self):
return ""
# This is the same for both edit and display.
class EncodingMixin:
def __init__(self, data, encoding_args):
self.encoding_args = encoding_args
super().__init__(str(data).encode(*self.encoding_args)) # type: ignore
def get_data(self):
data = super().get_data() # type: ignore
try:
return data.decode(*self.encoding_args)
except ValueError:
signals.status_message.send(message="Invalid encoding.")
raise
# urwid forces a different name for a subclass.
class TDisplay(EncodingMixin, col_bytes.Display):
pass
class TEdit(EncodingMixin, col_bytes.Edit):
pass
| mit | 1c1a6f59ac3a7b1ee6f30ad010523acd | 25.875 | 79 | 0.663566 | 3.850746 | false | false | false | false |
mitmproxy/mitmproxy | docs/scripts/clirecording/screenplays.py | 2 | 10137 | #!/usr/bin/env python3
from clidirector import CliDirector
def record_user_interface(d: CliDirector):
tmux = d.start_session(width=120, height=36)
window = tmux.attached_window
d.start_recording("recordings/mitmproxy_user_interface.cast")
d.message(
"Welcome to the mitmproxy tutorial. In this lesson we cover the user interface."
)
d.pause(1)
d.exec("mitmproxy")
d.pause(3)
d.message("This is the default view of mitmproxy.")
d.message("mitmproxy adds rows to the view as new requests come in.")
d.message("Let’s generate some requests using `curl` in a separate terminal.")
pane_top = d.current_pane
pane_bottom = window.split_window(attach=True)
pane_bottom.resize_pane(height=12)
d.focus_pane(pane_bottom)
d.pause(2)
d.type("curl")
d.message("Use curl’s `--proxy` option to configure mitmproxy as a proxy.")
d.type(" --proxy http://127.0.0.1:8080")
d.message("We use the text-based weather service `wttr.in`.")
d.exec(' "http://wttr.in/Dunedin?0"')
d.pause(2)
d.press_key("Up")
d.press_key("Left", count=3)
d.press_key("BSpace", count=7)
d.exec("Innsbruck")
d.pause(2)
d.exec("exit", target=pane_bottom)
d.focus_pane(pane_top)
d.message("You see the requests to `wttr.in` in the list of flows.")
d.message("mitmproxy is controlled using keyboard shortcuts.")
d.message("Use your arrow keys `↑` and `↓` to change the focused flow (`>>`).")
d.press_key("Down", pause=0.5)
d.press_key("Up", pause=0.5)
d.press_key("Down", pause=0.5)
d.press_key("Up", pause=0.5)
d.message("The focused flow (`>>`) is used as a target for various commands.")
d.message("One such command shows the flow details, it is bound to `ENTER`.")
d.message("Press `ENTER` to view the details of the focused flow.")
d.press_key("Enter")
d.message("The flow details view has 3 panes: request, response, and detail.")
d.message("Use your arrow keys `←` and `→` to switch between panes.")
d.press_key("Right", count=2, pause=2.5)
d.press_key("Left", count=2, pause=1)
d.message(
"Press `q` to exit the current view.",
)
d.type("q")
d.message("Press `?` to get a list of all available keyboard shortcuts.")
d.type("?")
d.pause(2)
d.press_key("Down", count=20, pause=0.25)
d.message("Tip: Remember the `?` shortcut. It works in every view.")
d.message("Press `q` to exit the current view.")
d.type("q")
d.message("Each shortcut is internally bound to a command.")
d.message("You can also execute commands directly (without using shortcuts).")
d.message("Press `:` to open the command prompt at the bottom.")
d.type(":")
d.message("Enter `console.view.flow @focus`.")
d.type("console.view.flow @focus")
d.message("The command `console.view.flow` opens the details view for a flow.")
d.message("The argument `@focus` defines the target flow.")
d.message("Press `ENTER` to execute the command.")
d.press_key("Enter")
d.message(
"Commands unleash the full power of mitmproxy, i.e., to configure interceptions."
)
d.message("You now know basics of mitmproxy’s UI and how to control it.")
d.pause(1)
d.message("In the next lesson you will learn to intercept flows.")
d.save_instructions("recordings/mitmproxy_user_interface_instructions.json")
d.end()
def record_intercept_requests(d: CliDirector):
tmux = d.start_session(width=120, height=36)
window = tmux.attached_window
d.start_recording("recordings/mitmproxy_intercept_requests.cast")
d.message(
"Welcome to the mitmproxy tutorial. In this lesson we cover the interception of requests."
)
d.pause(1)
d.exec("mitmproxy")
d.pause(3)
d.message("We first need to configure mitmproxy to intercept requests.")
d.message(
"Press `i` to prepopulate mitmproxy’s command prompt with `set intercept ''`."
)
d.type("i")
d.pause(2)
d.message(
"We use the flow filter expression `~u <regex>` to only intercept specific URLs."
)
d.message(
"Additionally, we use the filter `~q` to only intercept requests, but not responses."
)
d.message("We combine both flow filters using `&`.")
d.message(
"Enter `~u /Dunedin & ~q` between the quotes of the `set intercept` command and press `ENTER`."
)
d.exec("~u /Dunedin & ~q")
d.message("The bottom bar shows that the interception has been configured.")
d.message("Let’s generate a request using `curl` in a separate terminal.")
pane_top = d.current_pane
pane_bottom = window.split_window(attach=True)
pane_bottom.resize_pane(height=12)
d.focus_pane(pane_bottom)
d.pause(2)
d.exec('curl --proxy http://127.0.0.1:8080 "http://wttr.in/Dunedin?0"')
d.pause(2)
d.focus_pane(pane_top)
d.message("You see a new line in in the list of flows.")
d.message(
"The new flow is displayed in red to indicate that it has been intercepted."
)
d.message(
"Put the focus (`>>`) on the intercepted flow. This is already the case in our example."
)
d.message("Press `a` to resume this flow without making any changes.")
d.type("a")
d.pause(2)
d.focus_pane(pane_bottom)
d.message("Submit another request and focus its flow.")
d.press_key("Up")
d.press_key("Enter")
d.pause(2)
d.focus_pane(pane_top)
d.press_key("Down")
d.pause(1)
d.message(
"Press `X` to kill this flow, i.e., discard it without forwarding it to its final destination `wttr.in`."
)
d.type("X")
d.pause(3)
d.message("In the next lesson you will learn to modify intercepted flows.")
d.save_instructions("recordings/mitmproxy_intercept_requests_instructions.json")
d.end()
def record_modify_requests(d: CliDirector):
tmux = d.start_session(width=120, height=36)
window = tmux.attached_window
d.start_recording("recordings/mitmproxy_modify_requests.cast")
d.message(
"Welcome to the mitmproxy tutorial. In this lesson we cover the modification of intercepted requests."
)
d.pause(1)
d.exec("mitmproxy")
d.pause(3)
d.message(
"We configure and use the same interception rule as in the last tutorial."
)
d.message(
"Press `i` to prepopulate mitmproxy’s command prompt, enter the flow filter `~u /Dunedin & ~q`, and press `ENTER`."
)
d.type("i")
d.pause(2)
d.exec("~u /Dunedin & ~q")
d.message("Let’s generate a request using `curl` in a separate terminal.")
pane_top = d.current_pane
pane_bottom = window.split_window(attach=True)
pane_bottom.resize_pane(height=12)
d.focus_pane(pane_bottom)
d.pause(2)
d.exec('curl --proxy http://127.0.0.1:8080 "http://wttr.in/Dunedin?0"')
d.pause(2)
d.focus_pane(pane_top)
d.message("We now want to modify the intercepted request.")
d.message(
"Put the focus (`>>`) on the intercepted flow. This is already the case in our example."
)
d.message("Press `ENTER` to open the details view for the intercepted flow.")
d.press_key("Enter")
d.message("Press `e` to edit the intercepted flow.")
d.type("e")
d.message("mitmproxy asks which part to modify.")
d.message("Select `path` by using your arrow keys and press `ENTER`.")
d.press_key("Down", count=3, pause=0.5)
d.pause(1)
d.press_key("Enter")
d.message(
"mitmproxy shows all path components line by line, in our example its just `Dunedin`."
)
d.message("Press `ENTER` to modify the selected path component.")
d.press_key("Down", pause=2)
d.press_key("Enter")
d.message("Replace `Dunedin` with `Innsbruck`.")
d.press_key("BSpace", count=7, pause=0.5)
d.type("Innsbruck", pause=0.5)
d.message("Press `ESC` to confirm your change.")
d.press_key("Escape")
d.message("Press `q` to go back to the flow details view.")
d.type("q")
d.message("Press `a` to resume the intercepted flow.")
d.type("a")
d.pause(2)
d.message(
"You see that the request URL was modified and `wttr.in` replied with the weather report for `Innsbruck`."
)
d.message("In the next lesson you will learn to replay flows.")
d.save_instructions("recordings/mitmproxy_modify_requests_instructions.json")
d.end()
def record_replay_requests(d: CliDirector):
tmux = d.start_session(width=120, height=36)
window = tmux.attached_window
d.start_recording("recordings/mitmproxy_replay_requests.cast")
d.message(
"Welcome to the mitmproxy tutorial. In this lesson we cover replaying requests."
)
d.pause(1)
d.exec("mitmproxy")
d.pause(3)
d.message(
"Let’s generate a request that we can replay. We use `curl` in a separate terminal."
)
pane_top = d.current_pane
pane_bottom = window.split_window(attach=True)
pane_bottom.resize_pane(height=12)
d.focus_pane(pane_bottom)
d.pause(2)
d.exec('curl --proxy http://127.0.0.1:8080 "http://wttr.in/Dunedin?0"')
d.pause(2)
d.focus_pane(pane_top)
d.message("We now want to replay the this request.")
d.message(
"Put the focus (`>>`) on the request that should be replayed. This is already the case in our example."
)
d.message("Press `r` to replay the request.")
d.type("r")
d.message(
"Note that no new rows are added for replayed flows, but the existing row is updated."
)
d.message(
"Every time you press `r`, mitmproxy sends this request to the server again and updates the flow."
)
d.press_key("r", count=4, pause=1)
d.message("You can also modify a flow before replaying it.")
d.message("It works as shown in the previous lesson, by pressing `e`.")
d.message(
"Congratulations! You have completed all lessons of the mitmproxy tutorial."
)
d.save_instructions("recordings/mitmproxy_replay_requests_instructions.json")
d.end()
| mit | 811922bee1eb68d694760738caa9e966 | 30.212963 | 123 | 0.646692 | 3.250723 | false | false | false | false |
mitmproxy/mitmproxy | mitmproxy/io/compat.py | 1 | 15621 | """
This module handles the import of mitmproxy flows generated by old versions.
The flow file version is decoupled from the mitmproxy release cycle (since
v3.0.0dev) and versioning. Every change or migration gets a new flow file
version number, this prevents issues with developer builds and snapshots.
"""
import copy
import uuid
from typing import Any, Union
from mitmproxy import version
from mitmproxy.utils import strutils
def convert_011_012(data):
data[b"version"] = (0, 12)
return data
def convert_012_013(data):
data[b"version"] = (0, 13)
return data
def convert_013_014(data):
data[b"request"][b"first_line_format"] = data[b"request"].pop(b"form_in")
data[b"request"][b"http_version"] = (
b"HTTP/"
+ ".".join(str(x) for x in data[b"request"].pop(b"httpversion")).encode()
)
data[b"response"][b"http_version"] = (
b"HTTP/"
+ ".".join(str(x) for x in data[b"response"].pop(b"httpversion")).encode()
)
data[b"response"][b"status_code"] = data[b"response"].pop(b"code")
data[b"response"][b"body"] = data[b"response"].pop(b"content")
data[b"server_conn"].pop(b"state")
data[b"server_conn"][b"via"] = None
data[b"version"] = (0, 14)
return data
def convert_014_015(data):
data[b"version"] = (0, 15)
return data
def convert_015_016(data):
for m in (b"request", b"response"):
if b"body" in data[m]:
data[m][b"content"] = data[m].pop(b"body")
if b"msg" in data[b"response"]:
data[b"response"][b"reason"] = data[b"response"].pop(b"msg")
data[b"request"].pop(b"form_out", None)
data[b"version"] = (0, 16)
return data
def convert_016_017(data):
data[b"server_conn"][b"peer_address"] = None
data[b"version"] = (0, 17)
return data
def convert_017_018(data):
# convert_unicode needs to be called for every dual release and the first py3-only release
data = convert_unicode(data)
data["server_conn"]["ip_address"] = data["server_conn"].pop("peer_address", None)
data["marked"] = False
data["version"] = (0, 18)
return data
def convert_018_019(data):
# convert_unicode needs to be called for every dual release and the first py3-only release
data = convert_unicode(data)
data["request"].pop("stickyauth", None)
data["request"].pop("stickycookie", None)
data["client_conn"]["sni"] = None
data["client_conn"]["alpn_proto_negotiated"] = None
data["client_conn"]["cipher_name"] = None
data["client_conn"]["tls_version"] = None
data["server_conn"]["alpn_proto_negotiated"] = None
if data["server_conn"]["via"]:
data["server_conn"]["via"]["alpn_proto_negotiated"] = None
data["mode"] = "regular"
data["metadata"] = dict()
data["version"] = (0, 19)
return data
def convert_019_100(data):
# convert_unicode needs to be called for every dual release and the first py3-only release
data = convert_unicode(data)
data["version"] = (1, 0, 0)
return data
def convert_100_200(data):
data["version"] = (2, 0, 0)
data["client_conn"]["address"] = data["client_conn"]["address"]["address"]
data["server_conn"]["address"] = data["server_conn"]["address"]["address"]
data["server_conn"]["source_address"] = data["server_conn"]["source_address"][
"address"
]
if data["server_conn"]["ip_address"]:
data["server_conn"]["ip_address"] = data["server_conn"]["ip_address"]["address"]
if data["server_conn"]["via"]:
data["server_conn"]["via"]["address"] = data["server_conn"]["via"]["address"][
"address"
]
data["server_conn"]["via"]["source_address"] = data["server_conn"]["via"][
"source_address"
]["address"]
if data["server_conn"]["via"]["ip_address"]:
data["server_conn"]["via"]["ip_address"] = data["server_conn"]["via"][
"ip_address"
]["address"]
return data
def convert_200_300(data):
data["version"] = (3, 0, 0)
data["client_conn"]["mitmcert"] = None
data["server_conn"]["tls_version"] = None
if data["server_conn"]["via"]:
data["server_conn"]["via"]["tls_version"] = None
return data
def convert_300_4(data):
data["version"] = 4
# This is an empty migration to transition to the new versioning scheme.
return data
client_connections: dict[tuple[str, ...], str] = {}
server_connections: dict[tuple[str, ...], str] = {}
def convert_4_5(data):
data["version"] = 5
client_conn_key = (
data["client_conn"]["timestamp_start"],
*data["client_conn"]["address"],
)
server_conn_key = (
data["server_conn"]["timestamp_start"],
*data["server_conn"]["source_address"],
)
data["client_conn"]["id"] = client_connections.setdefault(
client_conn_key, str(uuid.uuid4())
)
data["server_conn"]["id"] = server_connections.setdefault(
server_conn_key, str(uuid.uuid4())
)
if data["server_conn"]["via"]:
server_conn_key = (
data["server_conn"]["via"]["timestamp_start"],
*data["server_conn"]["via"]["source_address"],
)
data["server_conn"]["via"]["id"] = server_connections.setdefault(
server_conn_key, str(uuid.uuid4())
)
return data
def convert_5_6(data):
data["version"] = 6
data["client_conn"]["tls_established"] = data["client_conn"].pop("ssl_established")
data["client_conn"]["timestamp_tls_setup"] = data["client_conn"].pop(
"timestamp_ssl_setup"
)
data["server_conn"]["tls_established"] = data["server_conn"].pop("ssl_established")
data["server_conn"]["timestamp_tls_setup"] = data["server_conn"].pop(
"timestamp_ssl_setup"
)
if data["server_conn"]["via"]:
data["server_conn"]["via"]["tls_established"] = data["server_conn"]["via"].pop(
"ssl_established"
)
data["server_conn"]["via"]["timestamp_tls_setup"] = data["server_conn"][
"via"
].pop("timestamp_ssl_setup")
return data
def convert_6_7(data):
data["version"] = 7
data["client_conn"]["tls_extensions"] = None
return data
def convert_7_8(data):
data["version"] = 8
if "request" in data and data["request"] is not None:
data["request"]["trailers"] = None
if "response" in data and data["response"] is not None:
data["response"]["trailers"] = None
return data
def convert_8_9(data):
data["version"] = 9
is_request_replay = False
if "request" in data:
data["request"].pop("first_line_format")
data["request"]["authority"] = b""
is_request_replay = data["request"].pop("is_replay", False)
is_response_replay = False
if "response" in data and data["response"] is not None:
is_response_replay = data["response"].pop("is_replay", False)
if is_request_replay: # pragma: no cover
data["is_replay"] = "request"
elif is_response_replay: # pragma: no cover
data["is_replay"] = "response"
else:
data["is_replay"] = None
return data
def convert_9_10(data):
data["version"] = 10
def conv_conn(conn):
conn["state"] = 0
conn["error"] = None
conn["tls"] = conn["tls_established"]
alpn = conn["alpn_proto_negotiated"]
conn["alpn_offers"] = [alpn] if alpn else None
cipher = conn["cipher_name"]
conn["cipher_list"] = [cipher] if cipher else None
def conv_cconn(conn):
conn["sockname"] = ("", 0)
cc = conn.pop("clientcert", None)
conn["certificate_list"] = [cc] if cc else []
conv_conn(conn)
def conv_sconn(conn):
crt = conn.pop("cert", None)
conn["certificate_list"] = [crt] if crt else []
conn["cipher_name"] = None
conn["via2"] = None
conv_conn(conn)
conv_cconn(data["client_conn"])
conv_sconn(data["server_conn"])
if data["server_conn"]["via"]:
conv_sconn(data["server_conn"]["via"])
return data
def convert_10_11(data):
data["version"] = 11
def conv_conn(conn):
conn["sni"] = strutils.always_str(conn["sni"], "ascii", "backslashreplace")
conn["alpn"] = conn.pop("alpn_proto_negotiated")
conn["alpn_offers"] = conn["alpn_offers"] or []
conn["cipher_list"] = conn["cipher_list"] or []
conv_conn(data["client_conn"])
conv_conn(data["server_conn"])
if data["server_conn"]["via"]:
conv_conn(data["server_conn"]["via"])
return data
_websocket_handshakes = {}
def convert_11_12(data):
data["version"] = 12
if "websocket" in data["metadata"]:
_websocket_handshakes[data["id"]] = copy.deepcopy(data)
if "websocket_handshake" in data["metadata"]:
ws_flow = data
try:
data = _websocket_handshakes.pop(data["metadata"]["websocket_handshake"])
except KeyError:
# The handshake flow is missing, which should never really happen. We make up a dummy.
data = {
"client_conn": data["client_conn"],
"error": data["error"],
"id": data["id"],
"intercepted": data["intercepted"],
"is_replay": data["is_replay"],
"marked": data["marked"],
"metadata": {},
"mode": "transparent",
"request": {
"authority": b"",
"content": None,
"headers": [],
"host": b"unknown",
"http_version": b"HTTP/1.1",
"method": b"GET",
"path": b"/",
"port": 80,
"scheme": b"http",
"timestamp_end": 0,
"timestamp_start": 0,
"trailers": None,
},
"response": None,
"server_conn": data["server_conn"],
"type": "http",
"version": 12,
}
data["metadata"]["duplicated"] = (
"This WebSocket flow has been migrated from an old file format version "
"and may appear duplicated."
)
data["websocket"] = {
"messages": ws_flow["messages"],
"closed_by_client": ws_flow["close_sender"] == "client",
"close_code": ws_flow["close_code"],
"close_reason": ws_flow["close_reason"],
"timestamp_end": data.get("server_conn", {}).get("timestamp_end", None),
}
else:
data["websocket"] = None
return data
def convert_12_13(data):
data["version"] = 13
if data["marked"]:
data["marked"] = ":default:"
else:
data["marked"] = ""
return data
def convert_13_14(data):
data["version"] = 14
data["comment"] = ""
# bugfix for https://github.com/mitmproxy/mitmproxy/issues/4576
if data.get("response", None) and data["response"]["timestamp_start"] is None:
data["response"]["timestamp_start"] = data["request"]["timestamp_end"]
data["response"]["timestamp_end"] = data["request"]["timestamp_end"] + 1
return data
def convert_14_15(data):
data["version"] = 15
if data.get("websocket", None):
# Add "injected" attribute.
data["websocket"]["messages"] = [
msg + [False] for msg in data["websocket"]["messages"]
]
return data
def convert_15_16(data):
data["version"] = 16
data["timestamp_created"] = data.get("request", data["client_conn"])[
"timestamp_start"
]
return data
def convert_16_17(data):
data["version"] = 17
data.pop("mode", None)
return data
def convert_17_18(data):
data["version"] = 18
data["client_conn"]["proxy_mode"] = "regular"
return data
def convert_18_19(data):
data["version"] = 19
data["client_conn"]["peername"] = data["client_conn"].pop("address", None)
if data["client_conn"].get("timestamp_start") is None:
data["client_conn"]["timestamp_start"] = 0.0
data["client_conn"].pop("tls_extensions")
data["server_conn"]["peername"] = data["server_conn"].pop("ip_address", None)
data["server_conn"]["sockname"] = data["server_conn"].pop("source_address", None)
data["server_conn"]["via"] = data["server_conn"].pop("via2", None)
for conn in ["client_conn", "server_conn"]:
data[conn].pop("tls_established")
data[conn]["cipher"] = data[conn].pop("cipher_name", None)
data[conn].setdefault("transport_protocol", "tcp")
for name in ["peername", "sockname", "address"]:
if data[conn].get(name) and isinstance(data[conn][name][0], bytes):
data[conn][name][0] = data[conn][name][0].decode(errors="backslashreplace")
if data["server_conn"]["sni"] is True:
data["server_conn"]["sni"] = data["server_conn"]["address"][0]
return data
def _convert_dict_keys(o: Any) -> Any:
if isinstance(o, dict):
return {strutils.always_str(k): _convert_dict_keys(v) for k, v in o.items()}
else:
return o
def _convert_dict_vals(o: dict, values_to_convert: dict) -> dict:
for k, v in values_to_convert.items():
if not o or k not in o:
continue # pragma: no cover
if v is True:
o[k] = strutils.always_str(o[k])
else:
_convert_dict_vals(o[k], v)
return o
def convert_unicode(data: dict) -> dict:
"""
This method converts between Python 3 and Python 2 dumpfiles.
"""
data = _convert_dict_keys(data)
data = _convert_dict_vals(
data,
{
"type": True,
"id": True,
"request": {"first_line_format": True},
"error": {"msg": True},
},
)
return data
converters = {
(0, 11): convert_011_012,
(0, 12): convert_012_013,
(0, 13): convert_013_014,
(0, 14): convert_014_015,
(0, 15): convert_015_016,
(0, 16): convert_016_017,
(0, 17): convert_017_018,
(0, 18): convert_018_019,
(0, 19): convert_019_100,
(1, 0): convert_100_200,
(2, 0): convert_200_300,
(3, 0): convert_300_4,
4: convert_4_5,
5: convert_5_6,
6: convert_6_7,
7: convert_7_8,
8: convert_8_9,
9: convert_9_10,
10: convert_10_11,
11: convert_11_12,
12: convert_12_13,
13: convert_13_14,
14: convert_14_15,
15: convert_15_16,
16: convert_16_17,
17: convert_17_18,
18: convert_18_19,
}
def migrate_flow(
flow_data: dict[Union[bytes, str], Any]
) -> dict[Union[bytes, str], Any]:
while True:
flow_version = flow_data.get(b"version", flow_data.get("version"))
# Historically, we used the mitmproxy minor version tuple as the flow format version.
if not isinstance(flow_version, int):
flow_version = tuple(flow_version)[:2]
if flow_version == version.FLOW_FORMAT_VERSION:
break
elif flow_version in converters:
flow_data = converters[flow_version](flow_data)
else:
should_upgrade = (
isinstance(flow_version, int)
and flow_version > version.FLOW_FORMAT_VERSION
)
raise ValueError(
"{} cannot read files with flow format version {}{}.".format(
version.MITMPROXY,
flow_version,
", please update mitmproxy" if should_upgrade else "",
)
)
return flow_data
| mit | 3a64d513ffe7a817befee5163ca006a8 | 29.689587 | 98 | 0.562704 | 3.533363 | false | false | false | false |
mitmproxy/mitmproxy | test/mitmproxy/proxy/layers/test_dns.py | 2 | 5464 | import time
from mitmproxy.proxy.commands import CloseConnection, Log, OpenConnection, SendData
from mitmproxy.proxy.events import ConnectionClosed, DataReceived
from mitmproxy.proxy.layers import dns
from mitmproxy.dns import DNSFlow
from mitmproxy.test.tutils import tdnsreq, tdnsresp
from ..tutils import Placeholder, Playbook, reply
def test_invalid_and_dummy_end(tctx):
assert (
Playbook(dns.DNSLayer(tctx))
>> DataReceived(tctx.client, b"Not a DNS packet")
<< Log(
"Client(client:1234, state=open) sent an invalid message: question #0: unpack encountered a label of length 99"
)
<< CloseConnection(tctx.client)
>> ConnectionClosed(tctx.client)
)
def test_regular(tctx):
f = Placeholder(DNSFlow)
req = tdnsreq()
resp = tdnsresp()
def resolve(flow: DNSFlow):
nonlocal req, resp
assert flow.request
req.timestamp = flow.request.timestamp
assert flow.request == req
resp.timestamp = time.time()
flow.response = resp
assert (
Playbook(dns.DNSLayer(tctx))
>> DataReceived(tctx.client, req.packed)
<< dns.DnsRequestHook(f)
>> reply(side_effect=resolve)
<< dns.DnsResponseHook(f)
>> reply()
<< SendData(tctx.client, resp.packed)
>> ConnectionClosed(tctx.client)
<< None
)
assert f().request == req
assert f().response == resp
assert not f().live
def test_regular_mode_no_hook(tctx):
f = Placeholder(DNSFlow)
layer = dns.DNSLayer(tctx)
layer.context.server.address = None
req = tdnsreq()
def no_resolve(flow: DNSFlow):
nonlocal req
assert flow.request
req.timestamp = flow.request.timestamp
assert flow.request == req
assert (
Playbook(layer)
>> DataReceived(tctx.client, req.packed)
<< dns.DnsRequestHook(f)
>> reply(side_effect=no_resolve)
<< dns.DnsErrorHook(f)
>> reply()
>> ConnectionClosed(tctx.client)
<< None
)
assert f().request == req
assert not f().response
assert not f().live
def test_reverse_premature_close(tctx):
f = Placeholder(DNSFlow)
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
req = tdnsreq()
assert (
Playbook(layer)
>> DataReceived(tctx.client, req.packed)
<< dns.DnsRequestHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
<< SendData(tctx.server, req.packed)
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< None
)
assert f().request
assert not f().response
assert not f().live
req.timestamp = f().request.timestamp
assert f().request == req
def test_reverse(tctx):
f = Placeholder(DNSFlow)
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
req = tdnsreq()
resp = tdnsresp()
assert (
Playbook(layer)
>> DataReceived(tctx.client, req.packed)
<< dns.DnsRequestHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
<< SendData(tctx.server, req.packed)
>> DataReceived(tctx.server, resp.packed)
<< dns.DnsResponseHook(f)
>> reply()
<< SendData(tctx.client, resp.packed)
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< None
)
assert f().request
assert f().response
assert not f().live
req.timestamp = f().request.timestamp
resp.timestamp = f().response.timestamp
assert f().request == req and f().response == resp
def test_reverse_fail_connection(tctx):
f = Placeholder(DNSFlow)
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
req = tdnsreq()
assert (
Playbook(layer)
>> DataReceived(tctx.client, req.packed)
<< dns.DnsRequestHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply("UDP no likey today.")
<< dns.DnsErrorHook(f)
>> reply()
<< None
)
assert f().request
assert not f().response
assert f().error.msg == "UDP no likey today."
req.timestamp = f().request.timestamp
assert f().request == req
def test_reverse_with_query_resend(tctx):
f = Placeholder(DNSFlow)
layer = dns.DNSLayer(tctx)
layer.context.server.address = ("8.8.8.8", 53)
req = tdnsreq()
req2 = tdnsreq()
req2.reserved = 4
resp = tdnsresp()
assert (
Playbook(layer)
>> DataReceived(tctx.client, req.packed)
<< dns.DnsRequestHook(f)
>> reply()
<< OpenConnection(tctx.server)
>> reply(None)
<< SendData(tctx.server, req.packed)
>> DataReceived(tctx.client, req2.packed)
<< dns.DnsRequestHook(f)
>> reply()
<< SendData(tctx.server, req2.packed)
>> DataReceived(tctx.server, resp.packed)
<< dns.DnsResponseHook(f)
>> reply()
<< SendData(tctx.client, resp.packed)
>> ConnectionClosed(tctx.client)
<< CloseConnection(tctx.server)
<< None
)
assert f().request
assert f().response
assert not f().live
req2.timestamp = f().request.timestamp
resp.timestamp = f().response.timestamp
assert f().request == req2
assert f().response == resp
| mit | 65e462cf3c54e0eeb10828944778cf4d | 26.32 | 123 | 0.598829 | 3.613757 | false | false | false | false |
django-extensions/django-extensions | django_extensions/management/commands/pipchecker.py | 1 | 15513 | # -*- coding: utf-8 -*-
import json
import os
import re
from distutils.version import LooseVersion
from urllib.parse import urlparse
from urllib.error import HTTPError
from urllib.request import Request, urlopen
from xmlrpc.client import ServerProxy, Fault
import pip
from time import sleep
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.color import color_style
from django_extensions.management.utils import signalcommand
from pip._internal.req import InstallRequirement
if LooseVersion(pip.__version__) >= LooseVersion('19.0'):
from pip._internal.req.constructors import install_req_from_line # noqa
try:
try:
from pip._internal.network.session import PipSession
except ImportError:
from pip._internal.download import PipSession # type:ignore
from pip._internal.req.req_file import parse_requirements
try:
from pip._internal.utils.misc import get_installed_distributions # type:ignore
except ImportError:
from typing import cast
def get_installed_distributions(
local_only=True,
include_editables=True,
editables_only=False,
user_only=False,
paths=None,
):
"""Return a list of installed Distribution objects.
Left for compatibility until direct pkg_resources uses are refactored out.
"""
from pip._internal.metadata import get_default_environment, get_environment
from pip._internal.metadata.pkg_resources import Distribution as _Dist
if paths is None:
env = get_default_environment()
else:
env = get_environment(paths)
dists = env.iter_installed_distributions(
local_only=local_only,
include_editables=include_editables,
editables_only=editables_only,
user_only=user_only,
)
return [cast(_Dist, dist)._dist for dist in dists]
except ImportError:
# pip < 10
try:
from pip import get_installed_distributions # type:ignore
from pip.download import PipSession # type:ignore
from pip.req import parse_requirements # type:ignore
except ImportError:
raise CommandError("Pip version 6 or higher is required")
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
class Command(BaseCommand):
help = "Scan pip requirement files for out-of-date packages."
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"-t", "--github-api-token", action="store",
dest="github_api_token", help="A github api authentication token."
)
parser.add_argument(
"-r", "--requirement", action="append", dest="requirements",
default=[], metavar="FILENAME",
help="Check all the packages listed in the given requirements "
"file. This option can be used multiple times."
),
parser.add_argument(
"-n", "--newer", action="store_true", dest="show_newer",
help="Also show when newer version then available is installed."
)
@signalcommand
def handle(self, *args, **options):
self.style = color_style()
self.options = options
if options["requirements"]:
req_files = options["requirements"]
elif os.path.exists("requirements.txt"):
req_files = ["requirements.txt"]
elif os.path.exists("requirements"):
req_files = [
"requirements/{0}".format(f) for f in os.listdir("requirements")
if os.path.isfile(os.path.join("requirements", f)) and f.lower().endswith(".txt")
]
elif os.path.exists("requirements-dev.txt"):
req_files = ["requirements-dev.txt"]
elif os.path.exists("requirements-prod.txt"):
req_files = ["requirements-prod.txt"]
else:
raise CommandError("Requirements file(s) not found")
self.reqs = {}
with PipSession() as session:
for filename in req_files:
for req in parse_requirements(filename, session=session):
if not isinstance(req, InstallRequirement):
req = install_req_from_line(req.requirement)
name = req.name if req.name else req.link.filename
# url attribute changed to link in pip version 6.1.0 and above
if LooseVersion(pip.__version__) > LooseVersion('6.0.8'):
self.reqs[name] = {
"pip_req": req,
"url": req.link,
}
else:
self.reqs[name] = {
"pip_req": req,
"url": req.url,
}
if options["github_api_token"]:
self.github_api_token = options["github_api_token"]
elif os.environ.get("GITHUB_API_TOKEN"):
self.github_api_token = os.environ.get("GITHUB_API_TOKEN")
else:
self.github_api_token = None # only 50 requests per hour
self.check_pypi()
if HAS_REQUESTS:
self.check_github()
else:
self.stdout.write(self.style.ERROR("Cannot check github urls. The requests library is not installed. ( pip install requests )"))
self.check_other()
def _urlopen_as_json(self, url, headers=None):
"""Shorcut for return contents as json"""
req = Request(url, headers=headers)
return json.loads(urlopen(req).read())
def _is_stable(self, version):
return not re.search(r'([ab]|rc|dev)\d+$', str(version))
def _available_version(self, dist_version, available):
if self._is_stable(dist_version):
stable = [v for v in available if self._is_stable(LooseVersion(v))]
if stable:
return LooseVersion(stable[0])
return LooseVersion(available[0]) if available else None
def check_pypi(self):
"""If the requirement is frozen to pypi, check for a new version."""
for dist in get_installed_distributions():
name = dist.project_name
if name in self.reqs.keys():
self.reqs[name]["dist"] = dist
pypi = ServerProxy("https://pypi.python.org/pypi")
for name, req in list(self.reqs.items()):
if req["url"]:
continue # skipping github packages.
elif "dist" in req:
dist = req["dist"]
dist_version = LooseVersion(dist.version)
retry = True
available = None
while retry:
try:
available = pypi.package_releases(req["pip_req"].name, True) or pypi.package_releases(req["pip_req"].name.replace('-', '_'), True)
retry = False
sleep(1) # crude way slow down to avoid HTTPTooManyRequests
except Fault as err:
self.stdout.write(err.faultString)
self.stdout.write("Retrying in 60 seconds!")
sleep(60)
available_version = self._available_version(dist_version, available)
if not available_version:
msg = self.style.WARN("release is not on pypi (check capitalization and/or --extra-index-url)")
elif self.options['show_newer'] and dist_version > available_version:
msg = self.style.INFO("{0} available (newer installed)".format(available_version))
elif available_version > dist_version:
msg = self.style.INFO("{0} available".format(available_version))
else:
msg = "up to date"
del self.reqs[name]
continue
pkg_info = self.style.BOLD("{dist.project_name} {dist.version}".format(dist=dist))
else:
msg = "not installed"
pkg_info = name
self.stdout.write("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
def check_github(self):
"""
If the requirement is frozen to a github url, check for new commits.
API Tokens
----------
For more than 50 github api calls per hour, pipchecker requires
authentication with the github api by settings the environemnt
variable ``GITHUB_API_TOKEN`` or setting the command flag
--github-api-token='mytoken'``.
To create a github api token for use at the command line::
curl -u 'rizumu' -d '{"scopes":["repo"], "note":"pipchecker"}' https://api.github.com/authorizations
For more info on github api tokens:
https://help.github.com/articles/creating-an-oauth-token-for-command-line-use
http://developer.github.com/v3/oauth/#oauth-authorizations-api
Requirement Format
------------------
Pipchecker gets the sha of frozen repo and checks if it is
found at the head of any branches. If it is not found then
the requirement is considered to be out of date.
Therefore, freezing at the commit hash will provide the expected
results, but if freezing at a branch or tag name, pipchecker will
not be able to determine with certainty if the repo is out of date.
Freeze at the commit hash (sha)::
git+git://github.com/django/django.git@393c268e725f5b229ecb554f3fac02cfc250d2df#egg=Django
https://github.com/django/django/archive/393c268e725f5b229ecb554f3fac02cfc250d2df.tar.gz#egg=Django
https://github.com/django/django/archive/393c268e725f5b229ecb554f3fac02cfc250d2df.zip#egg=Django
Freeze with a branch name::
git+git://github.com/django/django.git@master#egg=Django
https://github.com/django/django/archive/master.tar.gz#egg=Django
https://github.com/django/django/archive/master.zip#egg=Django
Freeze with a tag::
git+git://github.com/django/django.git@1.5b2#egg=Django
https://github.com/django/django/archive/1.5b2.tar.gz#egg=Django
https://github.com/django/django/archive/1.5b2.zip#egg=Django
Do not freeze::
git+git://github.com/django/django.git#egg=Django
"""
for name, req in list(self.reqs.items()):
req_url = req["url"]
if not req_url:
continue
req_url = str(req_url)
if req_url.startswith("git") and "github.com/" not in req_url:
continue
if req_url.endswith((".tar.gz", ".tar.bz2", ".zip")):
continue
headers = {
"content-type": "application/json",
}
if self.github_api_token:
headers["Authorization"] = "token {0}".format(self.github_api_token)
try:
path_parts = urlparse(req_url).path.split("#", 1)[0].strip("/").rstrip("/").split("/")
if len(path_parts) == 2:
user, repo = path_parts
elif 'archive' in path_parts:
# Supports URL of format:
# https://github.com/django/django/archive/master.tar.gz#egg=Django
# https://github.com/django/django/archive/master.zip#egg=Django
user, repo = path_parts[:2]
repo += '@' + path_parts[-1].replace('.tar.gz', '').replace('.zip', '')
else:
self.style.ERROR("\nFailed to parse %r\n" % (req_url, ))
continue
except (ValueError, IndexError) as e:
self.stdout.write(self.style.ERROR("\nFailed to parse %r: %s\n" % (req_url, e)))
continue
try:
test_auth = requests.get("https://api.github.com/django/", headers=headers).json()
except HTTPError as e:
self.stdout.write("\n%s\n" % str(e))
return
if "message" in test_auth and test_auth["message"] == "Bad credentials":
self.stdout.write(self.style.ERROR("\nGithub API: Bad credentials. Aborting!\n"))
return
elif "message" in test_auth and test_auth["message"].startswith("API Rate Limit Exceeded"):
self.stdout.write(self.style.ERROR("\nGithub API: Rate Limit Exceeded. Aborting!\n"))
return
frozen_commit_sha = None
if ".git" in repo:
repo_name, frozen_commit_full = repo.split(".git")
if frozen_commit_full.startswith("@"):
frozen_commit_sha = frozen_commit_full[1:]
elif "@" in repo:
repo_name, frozen_commit_sha = repo.split("@")
if frozen_commit_sha is None:
msg = self.style.ERROR("repo is not frozen")
if frozen_commit_sha:
branch_url = "https://api.github.com/repos/{0}/{1}/branches".format(user, repo_name)
branch_data = requests.get(branch_url, headers=headers).json()
frozen_commit_url = "https://api.github.com/repos/{0}/{1}/commits/{2}".format(
user, repo_name, frozen_commit_sha
)
frozen_commit_data = requests.get(frozen_commit_url, headers=headers).json()
if "message" in frozen_commit_data and frozen_commit_data["message"] == "Not Found":
msg = self.style.ERROR("{0} not found in {1}. Repo may be private.".format(frozen_commit_sha[:10], name))
elif frozen_commit_data["sha"] in [branch["commit"]["sha"] for branch in branch_data]:
msg = self.style.BOLD("up to date")
else:
msg = self.style.INFO("{0} is not the head of any branch".format(frozen_commit_data["sha"][:10]))
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif frozen_commit_sha is None:
pkg_info = name
else:
pkg_info = "{0} {1}".format(name, frozen_commit_sha[:10])
self.stdout.write("{pkg_info:40} {msg}".format(pkg_info=pkg_info, msg=msg))
del self.reqs[name]
def check_other(self):
"""
If the requirement is frozen somewhere other than pypi or github, skip.
If you have a private pypi or use --extra-index-url, consider contributing
support here.
"""
if self.reqs:
self.stdout.write(self.style.ERROR("\nOnly pypi and github based requirements are supported:"))
for name, req in self.reqs.items():
if "dist" in req:
pkg_info = "{dist.project_name} {dist.version}".format(dist=req["dist"])
elif "url" in req:
pkg_info = "{url}".format(url=req["url"])
else:
pkg_info = "unknown package"
self.stdout.write(self.style.BOLD("{pkg_info:40} is not a pypi or github requirement".format(pkg_info=pkg_info)))
| mit | d125ec2965114713713a62192b8cd4a5 | 42.698592 | 154 | 0.566364 | 4.255967 | false | false | false | false |
django-extensions/django-extensions | django_extensions/logging/filters.py | 9 | 1126 | # -*- coding: utf-8 -*-
import time
import logging
from hashlib import md5
# also see: https://djangosnippets.org/snippets/2242/
class RateLimiterFilter(logging.Filter):
def filter(self, record):
from django.conf import settings
from django.core.cache import cache
# Rate is specified as 1 messages logged per N seconds. (aka cache timeout)
rate = getattr(settings, 'RATE_LIMITER_FILTER_RATE', 10)
prefix = getattr(settings, 'RATE_LIMITER_FILTER_PREFIX', 'ratelimiterfilter')
subject = record.getMessage()
cache_key = "%s:%s" % (prefix, md5(subject).hexdigest())
cache_count_key = "%s:count" % cache_key
result = cache.get_many([cache_key, cache_count_key])
value = result.get(cache_key)
cntr = result.get(cache_count_key)
if not cntr:
cntr = 1
cache.set(cache_count_key, cntr, rate + 60)
if value:
cache.incr(cache_count_key)
return False
record.msg = "[%sx] %s" % (cntr, record.msg)
cache.set(cache_key, time.time(), rate)
return True
| mit | 2f33c6fd3b99a9503cbe4f9e6a4a306e | 30.277778 | 85 | 0.610124 | 3.632258 | false | false | false | false |
django-extensions/django-extensions | django_extensions/management/commands/compile_pyc.py | 1 | 1273 | # -*- coding: utf-8 -*-
import fnmatch
import os
import py_compile
from os.path import join as _j
from typing import List
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
help = "Compile python bytecode files for the project."
requires_system_checks: List[str] = []
def add_arguments(self, parser):
parser.add_argument('--path', '-p', action='store', dest='path',
help='Specify path to recurse into')
@signalcommand
def handle(self, *args, **options):
project_root = options["path"]
if not project_root:
project_root = getattr(settings, 'BASE_DIR', None)
verbosity = options["verbosity"]
if not project_root:
raise CommandError("No --path specified and settings.py does not contain BASE_DIR")
for root, dirs, filenames in os.walk(project_root):
for filename in fnmatch.filter(filenames, '*.py'):
full_path = _j(root, filename)
if verbosity > 1:
self.stdout.write("Compiling %s...\n" % full_path)
py_compile.compile(full_path)
| mit | 18d2c4d387737bf44cb0921aa3deb1ff | 33.405405 | 95 | 0.629222 | 4.271812 | false | false | false | false |
django-extensions/django-extensions | django_extensions/management/commands/clean_pyc.py | 1 | 1555 | # -*- coding: utf-8 -*-
import fnmatch
import os
from os.path import join as _j
from typing import List
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django_extensions.management.utils import signalcommand
class Command(BaseCommand):
help = "Removes all python bytecode compiled files from the project."
requires_system_checks: List[str] = []
def add_arguments(self, parser):
parser.add_argument(
'--optimize', '-o', '-O', action='store_true',
dest='optimize', default=False,
help='Remove optimized python bytecode files'
)
parser.add_argument(
'--path', '-p', action='store', dest='path',
help='Specify path to recurse into'
)
@signalcommand
def handle(self, *args, **options):
project_root = options.get("path", getattr(settings, 'BASE_DIR', None))
if not project_root:
project_root = getattr(settings, 'BASE_DIR', None)
verbosity = options["verbosity"]
if not project_root:
raise CommandError("No --path specified and settings.py does not contain BASE_DIR")
exts = options["optimize"] and "*.py[co]" or "*.pyc"
for root, dirs, filenames in os.walk(project_root):
for filename in fnmatch.filter(filenames, exts):
full_path = _j(root, filename)
if verbosity > 1:
self.stdout.write("%s\n" % full_path)
os.remove(full_path)
| mit | 6c638f999354b8267bc5bed31d4ba2ba | 32.804348 | 95 | 0.609003 | 4.225543 | false | false | false | false |
django-extensions/django-extensions | django_extensions/templatetags/highlighting.py | 1 | 3224 | # -*- coding: utf-8 -*-
"""
Similar to syntax_color.py but this is intended more for being able to
copy+paste actual code into your Django templates without needing to
escape or anything crazy.
http://lobstertech.com/2008/aug/30/django_syntax_highlight_template_tag/
Example:
{% load highlighting %}
<style>
@import url("http://lobstertech.com/media/css/highlight.css");
.highlight { background: #f8f8f8; }
.highlight { font-size: 11px; margin: 1em; border: 1px solid #ccc;
border-left: 3px solid #F90; padding: 0; }
.highlight pre { padding: 1em; overflow: auto; line-height: 120%; margin: 0; }
.predesc { margin: 1.5em 1.5em -2.5em 1em; text-align: right;
font: bold 12px Tahoma, Arial, sans-serif;
letter-spacing: 1px; color: #333; }
</style>
<h2>check out this code</h2>
{% highlight 'python' 'Excerpt: blah.py' %}
def need_food(self):
print("Love is <colder> than &death&")
{% endhighlight %}
"""
from django import template
from django.template import (
Context, Node, Template, TemplateSyntaxError, Variable,
)
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
try:
from pygments import highlight as pyghighlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
HAS_PYGMENTS = True
except ImportError: # pragma: no cover
HAS_PYGMENTS = False
register = template.Library()
@register.filter(is_safe=True)
@stringfilter
def parse_template(value):
return mark_safe(Template(value).render(Context()))
class CodeNode(Node):
def __init__(self, language, nodelist, name=''):
self.language = Variable(language)
self.nodelist = nodelist
if name:
self.name = Variable(name)
else:
self.name = None
def render(self, context):
code = self.nodelist.render(context).strip()
lexer = get_lexer_by_name(self.language.resolve(context))
formatter = HtmlFormatter(linenos=False)
html = ""
if self.name:
name = self.name.resolve(context)
html = '<div class="predesc"><span>%s</span></div>' % name
return html + pyghighlight(code, lexer, formatter)
@register.tag
def highlight(parser, token):
"""
Tag to put a highlighted source code <pre> block in your code.
This takes two arguments, the language and a little explaination message
that will be generated before the code. The second argument is optional.
Your code will be fed through pygments so you can use any language it
supports.
Usage::
{% load highlighting %}
{% highlight 'python' 'Excerpt: blah.py' %}
def need_food(self):
print("Love is colder than death")
{% endhighlight %}
"""
if not HAS_PYGMENTS: # pragma: no cover
raise ImportError("Please install 'pygments' library to use highlighting.")
nodelist = parser.parse(('endhighlight',))
parser.delete_first_token()
bits = token.split_contents()[1:]
if len(bits) < 1:
raise TemplateSyntaxError("'highlight' statement requires an argument")
return CodeNode(bits[0], nodelist, *bits[1:])
| mit | 6c850750704af830f82a78763ef0b40b | 30.607843 | 83 | 0.666253 | 3.727168 | false | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.