repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
invinst/ResponseBot | responsebot/responsebot_client.py | ResponseBotClient.add_list_member | def add_list_member(self, list_id, user_id):
"""
Add a user to list
:param list_id: list ID number
:param user_id: user ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.add_list_member(list_id=list_id, user_id=user_id))) | python | def add_list_member(self, list_id, user_id):
"""
Add a user to list
:param list_id: list ID number
:param user_id: user ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.add_list_member(list_id=list_id, user_id=user_id))) | [
"def",
"add_list_member",
"(",
"self",
",",
"list_id",
",",
"user_id",
")",
":",
"return",
"List",
"(",
"tweepy_list_to_json",
"(",
"self",
".",
"_client",
".",
"add_list_member",
"(",
"list_id",
"=",
"list_id",
",",
"user_id",
"=",
"user_id",
")",
")",
")... | Add a user to list
:param list_id: list ID number
:param user_id: user ID number
:return: :class:`~responsebot.models.List` object | [
"Add",
"a",
"user",
"to",
"list"
] | a6b1a431a343007f7ae55a193e432a61af22253f | https://github.com/invinst/ResponseBot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/responsebot_client.py#L259-L267 | train | 55,300 |
invinst/ResponseBot | responsebot/responsebot_client.py | ResponseBotClient.remove_list_member | def remove_list_member(self, list_id, user_id):
"""
Remove a user from a list
:param list_id: list ID number
:param user_id: user ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.remove_list_member(list_id=list_id, user_id=user_id))) | python | def remove_list_member(self, list_id, user_id):
"""
Remove a user from a list
:param list_id: list ID number
:param user_id: user ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.remove_list_member(list_id=list_id, user_id=user_id))) | [
"def",
"remove_list_member",
"(",
"self",
",",
"list_id",
",",
"user_id",
")",
":",
"return",
"List",
"(",
"tweepy_list_to_json",
"(",
"self",
".",
"_client",
".",
"remove_list_member",
"(",
"list_id",
"=",
"list_id",
",",
"user_id",
"=",
"user_id",
")",
")"... | Remove a user from a list
:param list_id: list ID number
:param user_id: user ID number
:return: :class:`~responsebot.models.List` object | [
"Remove",
"a",
"user",
"from",
"a",
"list"
] | a6b1a431a343007f7ae55a193e432a61af22253f | https://github.com/invinst/ResponseBot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/responsebot_client.py#L270-L278 | train | 55,301 |
invinst/ResponseBot | responsebot/responsebot_client.py | ResponseBotClient.list_members | def list_members(self, list_id):
"""
List users in a list
:param list_id: list ID number
:return: list of :class:`~responsebot.models.User` objects
"""
return [User(user._json) for user in self._client.list_members(list_id=list_id)] | python | def list_members(self, list_id):
"""
List users in a list
:param list_id: list ID number
:return: list of :class:`~responsebot.models.User` objects
"""
return [User(user._json) for user in self._client.list_members(list_id=list_id)] | [
"def",
"list_members",
"(",
"self",
",",
"list_id",
")",
":",
"return",
"[",
"User",
"(",
"user",
".",
"_json",
")",
"for",
"user",
"in",
"self",
".",
"_client",
".",
"list_members",
"(",
"list_id",
"=",
"list_id",
")",
"]"
] | List users in a list
:param list_id: list ID number
:return: list of :class:`~responsebot.models.User` objects | [
"List",
"users",
"in",
"a",
"list"
] | a6b1a431a343007f7ae55a193e432a61af22253f | https://github.com/invinst/ResponseBot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/responsebot_client.py#L281-L288 | train | 55,302 |
invinst/ResponseBot | responsebot/responsebot_client.py | ResponseBotClient.is_list_member | def is_list_member(self, list_id, user_id):
"""
Check if a user is member of a list
:param list_id: list ID number
:param user_id: user ID number
:return: :code:`True` if user is member of list, :code:`False` otherwise
"""
try:
return bool(self._client.show_list_member(list_id=list_id, user_id=user_id))
except TweepError as e:
if e.api_code == TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER:
return False
raise | python | def is_list_member(self, list_id, user_id):
"""
Check if a user is member of a list
:param list_id: list ID number
:param user_id: user ID number
:return: :code:`True` if user is member of list, :code:`False` otherwise
"""
try:
return bool(self._client.show_list_member(list_id=list_id, user_id=user_id))
except TweepError as e:
if e.api_code == TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER:
return False
raise | [
"def",
"is_list_member",
"(",
"self",
",",
"list_id",
",",
"user_id",
")",
":",
"try",
":",
"return",
"bool",
"(",
"self",
".",
"_client",
".",
"show_list_member",
"(",
"list_id",
"=",
"list_id",
",",
"user_id",
"=",
"user_id",
")",
")",
"except",
"Tweep... | Check if a user is member of a list
:param list_id: list ID number
:param user_id: user ID number
:return: :code:`True` if user is member of list, :code:`False` otherwise | [
"Check",
"if",
"a",
"user",
"is",
"member",
"of",
"a",
"list"
] | a6b1a431a343007f7ae55a193e432a61af22253f | https://github.com/invinst/ResponseBot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/responsebot_client.py#L291-L304 | train | 55,303 |
invinst/ResponseBot | responsebot/responsebot_client.py | ResponseBotClient.subscribe_list | def subscribe_list(self, list_id):
"""
Subscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.subscribe_list(list_id=list_id))) | python | def subscribe_list(self, list_id):
"""
Subscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.subscribe_list(list_id=list_id))) | [
"def",
"subscribe_list",
"(",
"self",
",",
"list_id",
")",
":",
"return",
"List",
"(",
"tweepy_list_to_json",
"(",
"self",
".",
"_client",
".",
"subscribe_list",
"(",
"list_id",
"=",
"list_id",
")",
")",
")"
] | Subscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object | [
"Subscribe",
"to",
"a",
"list"
] | a6b1a431a343007f7ae55a193e432a61af22253f | https://github.com/invinst/ResponseBot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/responsebot_client.py#L307-L314 | train | 55,304 |
invinst/ResponseBot | responsebot/responsebot_client.py | ResponseBotClient.unsubscribe_list | def unsubscribe_list(self, list_id):
"""
Unsubscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.unsubscribe_list(list_id=list_id))) | python | def unsubscribe_list(self, list_id):
"""
Unsubscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object
"""
return List(tweepy_list_to_json(self._client.unsubscribe_list(list_id=list_id))) | [
"def",
"unsubscribe_list",
"(",
"self",
",",
"list_id",
")",
":",
"return",
"List",
"(",
"tweepy_list_to_json",
"(",
"self",
".",
"_client",
".",
"unsubscribe_list",
"(",
"list_id",
"=",
"list_id",
")",
")",
")"
] | Unsubscribe to a list
:param list_id: list ID number
:return: :class:`~responsebot.models.List` object | [
"Unsubscribe",
"to",
"a",
"list"
] | a6b1a431a343007f7ae55a193e432a61af22253f | https://github.com/invinst/ResponseBot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/responsebot_client.py#L317-L324 | train | 55,305 |
invinst/ResponseBot | responsebot/responsebot_client.py | ResponseBotClient.list_subscribers | def list_subscribers(self, list_id):
"""
List subscribers of a list
:param list_id: list ID number
:return: :class:`~responsebot.models.User` object
"""
return [User(user._json) for user in self._client.list_subscribers(list_id=list_id)] | python | def list_subscribers(self, list_id):
"""
List subscribers of a list
:param list_id: list ID number
:return: :class:`~responsebot.models.User` object
"""
return [User(user._json) for user in self._client.list_subscribers(list_id=list_id)] | [
"def",
"list_subscribers",
"(",
"self",
",",
"list_id",
")",
":",
"return",
"[",
"User",
"(",
"user",
".",
"_json",
")",
"for",
"user",
"in",
"self",
".",
"_client",
".",
"list_subscribers",
"(",
"list_id",
"=",
"list_id",
")",
"]"
] | List subscribers of a list
:param list_id: list ID number
:return: :class:`~responsebot.models.User` object | [
"List",
"subscribers",
"of",
"a",
"list"
] | a6b1a431a343007f7ae55a193e432a61af22253f | https://github.com/invinst/ResponseBot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/responsebot_client.py#L327-L334 | train | 55,306 |
invinst/ResponseBot | responsebot/responsebot_client.py | ResponseBotClient.is_subscribed_list | def is_subscribed_list(self, list_id, user_id):
"""
Check if user is a subscribed of specified list
:param list_id: list ID number
:param user_id: user ID number
:return: :code:`True` if user is subscribed of list, :code:`False` otherwise
"""
try:
return bool(self._client.show_list_subscriber(list_id=list_id, user_id=user_id))
except TweepError as e:
if e.api_code == TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER:
return False
raise | python | def is_subscribed_list(self, list_id, user_id):
"""
Check if user is a subscribed of specified list
:param list_id: list ID number
:param user_id: user ID number
:return: :code:`True` if user is subscribed of list, :code:`False` otherwise
"""
try:
return bool(self._client.show_list_subscriber(list_id=list_id, user_id=user_id))
except TweepError as e:
if e.api_code == TWITTER_USER_IS_NOT_LIST_MEMBER_SUBSCRIBER:
return False
raise | [
"def",
"is_subscribed_list",
"(",
"self",
",",
"list_id",
",",
"user_id",
")",
":",
"try",
":",
"return",
"bool",
"(",
"self",
".",
"_client",
".",
"show_list_subscriber",
"(",
"list_id",
"=",
"list_id",
",",
"user_id",
"=",
"user_id",
")",
")",
"except",
... | Check if user is a subscribed of specified list
:param list_id: list ID number
:param user_id: user ID number
:return: :code:`True` if user is subscribed of list, :code:`False` otherwise | [
"Check",
"if",
"user",
"is",
"a",
"subscribed",
"of",
"specified",
"list"
] | a6b1a431a343007f7ae55a193e432a61af22253f | https://github.com/invinst/ResponseBot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/responsebot_client.py#L337-L350 | train | 55,307 |
invinst/ResponseBot | responsebot/utils/auth_utils.py | auth | def auth(config):
"""
Perform authentication with Twitter and return a client instance to communicate with Twitter
:param config: ResponseBot config
:type config: :class:`~responsebot.utils.config_utils.ResponseBotConfig`
:return: client instance to execute twitter action
:rtype: :class:`~responsebot.responsebot_client.ResponseBotClient`
:raises: :class:`~responsebot.common.exceptions.AuthenticationError`: If failed to authenticate
:raises: :class:`~responsebot.common.exceptions.APIQuotaError`: If API call rate reached limit
"""
auth = tweepy.OAuthHandler(config.get('consumer_key'), config.get('consumer_secret'))
auth.set_access_token(config.get('token_key'), config.get('token_secret'))
api = tweepy.API(auth)
try:
api.verify_credentials()
except RateLimitError as e:
raise APIQuotaError(e.args[0][0]['message'])
except TweepError as e:
raise AuthenticationError(e.args[0][0]['message'])
else:
logging.info('Successfully authenticated as %s' % api.me().screen_name)
return ResponseBotClient(config=config, client=api) | python | def auth(config):
"""
Perform authentication with Twitter and return a client instance to communicate with Twitter
:param config: ResponseBot config
:type config: :class:`~responsebot.utils.config_utils.ResponseBotConfig`
:return: client instance to execute twitter action
:rtype: :class:`~responsebot.responsebot_client.ResponseBotClient`
:raises: :class:`~responsebot.common.exceptions.AuthenticationError`: If failed to authenticate
:raises: :class:`~responsebot.common.exceptions.APIQuotaError`: If API call rate reached limit
"""
auth = tweepy.OAuthHandler(config.get('consumer_key'), config.get('consumer_secret'))
auth.set_access_token(config.get('token_key'), config.get('token_secret'))
api = tweepy.API(auth)
try:
api.verify_credentials()
except RateLimitError as e:
raise APIQuotaError(e.args[0][0]['message'])
except TweepError as e:
raise AuthenticationError(e.args[0][0]['message'])
else:
logging.info('Successfully authenticated as %s' % api.me().screen_name)
return ResponseBotClient(config=config, client=api) | [
"def",
"auth",
"(",
"config",
")",
":",
"auth",
"=",
"tweepy",
".",
"OAuthHandler",
"(",
"config",
".",
"get",
"(",
"'consumer_key'",
")",
",",
"config",
".",
"get",
"(",
"'consumer_secret'",
")",
")",
"auth",
".",
"set_access_token",
"(",
"config",
".",... | Perform authentication with Twitter and return a client instance to communicate with Twitter
:param config: ResponseBot config
:type config: :class:`~responsebot.utils.config_utils.ResponseBotConfig`
:return: client instance to execute twitter action
:rtype: :class:`~responsebot.responsebot_client.ResponseBotClient`
:raises: :class:`~responsebot.common.exceptions.AuthenticationError`: If failed to authenticate
:raises: :class:`~responsebot.common.exceptions.APIQuotaError`: If API call rate reached limit | [
"Perform",
"authentication",
"with",
"Twitter",
"and",
"return",
"a",
"client",
"instance",
"to",
"communicate",
"with",
"Twitter"
] | a6b1a431a343007f7ae55a193e432a61af22253f | https://github.com/invinst/ResponseBot/blob/a6b1a431a343007f7ae55a193e432a61af22253f/responsebot/utils/auth_utils.py#L15-L39 | train | 55,308 |
mickybart/python-atlasbroker | atlasbroker/service.py | AtlasBroker.bind | def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding:
"""Binding the instance
see openbrokerapi documentation
"""
# Find the instance
instance = self._backend.find(instance_id)
# Find or create the binding
binding = self._backend.find(binding_id, instance)
# Create the binding if needed
return self._backend.bind(binding, details.parameters) | python | def bind(self, instance_id: str, binding_id: str, details: BindDetails) -> Binding:
"""Binding the instance
see openbrokerapi documentation
"""
# Find the instance
instance = self._backend.find(instance_id)
# Find or create the binding
binding = self._backend.find(binding_id, instance)
# Create the binding if needed
return self._backend.bind(binding, details.parameters) | [
"def",
"bind",
"(",
"self",
",",
"instance_id",
":",
"str",
",",
"binding_id",
":",
"str",
",",
"details",
":",
"BindDetails",
")",
"->",
"Binding",
":",
"# Find the instance",
"instance",
"=",
"self",
".",
"_backend",
".",
"find",
"(",
"instance_id",
")",... | Binding the instance
see openbrokerapi documentation | [
"Binding",
"the",
"instance",
"see",
"openbrokerapi",
"documentation"
] | 5b741c1348a6d33b342e0852a8a8900fa9ebf00a | https://github.com/mickybart/python-atlasbroker/blob/5b741c1348a6d33b342e0852a8a8900fa9ebf00a/atlasbroker/service.py#L126-L139 | train | 55,309 |
FreekingDean/insteon-hub | insteon/api.py | InsteonAPI.post | def post(self, path, data={}):
'''Perform POST Request '''
response = requests.post(API_URL + path, data=json.dumps(data), headers=self._set_headers())
return self._check_response(response, self.post, path, data) | python | def post(self, path, data={}):
'''Perform POST Request '''
response = requests.post(API_URL + path, data=json.dumps(data), headers=self._set_headers())
return self._check_response(response, self.post, path, data) | [
"def",
"post",
"(",
"self",
",",
"path",
",",
"data",
"=",
"{",
"}",
")",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"API_URL",
"+",
"path",
",",
"data",
"=",
"json",
".",
"dumps",
"(",
"data",
")",
",",
"headers",
"=",
"self",
".",
"_... | Perform POST Request | [
"Perform",
"POST",
"Request"
] | afd60d0a7fa74752f29d63c9bb6ccccd46d7aa3e | https://github.com/FreekingDean/insteon-hub/blob/afd60d0a7fa74752f29d63c9bb6ccccd46d7aa3e/insteon/api.py#L39-L42 | train | 55,310 |
FreekingDean/insteon-hub | insteon/api.py | InsteonAPI.delete | def delete(self, path, data={}):
'''Perform DELETE Request'''
if len(data) != 0:
parameter_string = ''
for k,v in data.items():
parameter_string += '{}={}'.format(k,v)
parameter_string += '&'
path += '?' + parameter_string
response = requests.delete(API_URL + path, headers=self._set_headers())
return self._check_response(response, self.delete, path, data) | python | def delete(self, path, data={}):
'''Perform DELETE Request'''
if len(data) != 0:
parameter_string = ''
for k,v in data.items():
parameter_string += '{}={}'.format(k,v)
parameter_string += '&'
path += '?' + parameter_string
response = requests.delete(API_URL + path, headers=self._set_headers())
return self._check_response(response, self.delete, path, data) | [
"def",
"delete",
"(",
"self",
",",
"path",
",",
"data",
"=",
"{",
"}",
")",
":",
"if",
"len",
"(",
"data",
")",
"!=",
"0",
":",
"parameter_string",
"=",
"''",
"for",
"k",
",",
"v",
"in",
"data",
".",
"items",
"(",
")",
":",
"parameter_string",
... | Perform DELETE Request | [
"Perform",
"DELETE",
"Request"
] | afd60d0a7fa74752f29d63c9bb6ccccd46d7aa3e | https://github.com/FreekingDean/insteon-hub/blob/afd60d0a7fa74752f29d63c9bb6ccccd46d7aa3e/insteon/api.py#L49-L59 | train | 55,311 |
kevinconway/confpy | confpy/loaders/ini.py | IniFile.parsed | def parsed(self):
"""Get the ConfigParser object which represents the content.
This property is cached and only parses the content once.
"""
if not self._parsed:
self._parsed = ConfigParser()
self._parsed.readfp(io.StringIO(self.content))
return self._parsed | python | def parsed(self):
"""Get the ConfigParser object which represents the content.
This property is cached and only parses the content once.
"""
if not self._parsed:
self._parsed = ConfigParser()
self._parsed.readfp(io.StringIO(self.content))
return self._parsed | [
"def",
"parsed",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_parsed",
":",
"self",
".",
"_parsed",
"=",
"ConfigParser",
"(",
")",
"self",
".",
"_parsed",
".",
"readfp",
"(",
"io",
".",
"StringIO",
"(",
"self",
".",
"content",
")",
")",
"retu... | Get the ConfigParser object which represents the content.
This property is cached and only parses the content once. | [
"Get",
"the",
"ConfigParser",
"object",
"which",
"represents",
"the",
"content",
"."
] | 1ee8afcab46ac6915a5ff4184180434ac7b84a60 | https://github.com/kevinconway/confpy/blob/1ee8afcab46ac6915a5ff4184180434ac7b84a60/confpy/loaders/ini.py#L30-L40 | train | 55,312 |
MacHu-GWU/crawlib-project | crawlib/cache.py | create_cache | def create_cache(directory, compress_level=6, value_type_is_binary=False, **kwargs):
"""
Create a html cache. Html string will be automatically compressed.
:param directory: path for the cache directory.
:param compress_level: 0 ~ 9, 9 is slowest and smallest.
:param kwargs: other arguments.
:return: a `diskcache.Cache()`
"""
cache = diskcache.Cache(
directory,
disk=CompressedDisk,
disk_compress_level=compress_level,
disk_value_type_is_binary=value_type_is_binary,
**kwargs
)
return cache | python | def create_cache(directory, compress_level=6, value_type_is_binary=False, **kwargs):
"""
Create a html cache. Html string will be automatically compressed.
:param directory: path for the cache directory.
:param compress_level: 0 ~ 9, 9 is slowest and smallest.
:param kwargs: other arguments.
:return: a `diskcache.Cache()`
"""
cache = diskcache.Cache(
directory,
disk=CompressedDisk,
disk_compress_level=compress_level,
disk_value_type_is_binary=value_type_is_binary,
**kwargs
)
return cache | [
"def",
"create_cache",
"(",
"directory",
",",
"compress_level",
"=",
"6",
",",
"value_type_is_binary",
"=",
"False",
",",
"*",
"*",
"kwargs",
")",
":",
"cache",
"=",
"diskcache",
".",
"Cache",
"(",
"directory",
",",
"disk",
"=",
"CompressedDisk",
",",
"dis... | Create a html cache. Html string will be automatically compressed.
:param directory: path for the cache directory.
:param compress_level: 0 ~ 9, 9 is slowest and smallest.
:param kwargs: other arguments.
:return: a `diskcache.Cache()` | [
"Create",
"a",
"html",
"cache",
".",
"Html",
"string",
"will",
"be",
"automatically",
"compressed",
"."
] | 241516f2a7a0a32c692f7af35a1f44064e8ce1ab | https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/cache.py#L75-L91 | train | 55,313 |
scivision/sciencedates | sciencedates/ticks.py | timeticks | def timeticks(tdiff):
"""
NOTE do NOT use "interval" or ticks are misaligned! use "bysecond" only!
"""
if isinstance(tdiff, xarray.DataArray): # len==1
tdiff = timedelta(seconds=tdiff.values / np.timedelta64(1, 's'))
assert isinstance(tdiff, timedelta), 'expecting datetime.timedelta'
if tdiff > timedelta(hours=2):
return None, None
elif tdiff > timedelta(minutes=20):
return MinuteLocator(byminute=range(0, 60, 5)), MinuteLocator(byminute=range(0, 60, 2))
elif (timedelta(minutes=10) < tdiff) & (tdiff <= timedelta(minutes=20)):
return MinuteLocator(byminute=range(0, 60, 2)), MinuteLocator(byminute=range(0, 60, 1))
elif (timedelta(minutes=5) < tdiff) & (tdiff <= timedelta(minutes=10)):
return MinuteLocator(byminute=range(0, 60, 1)), SecondLocator(bysecond=range(0, 60, 30))
elif (timedelta(minutes=1) < tdiff) & (tdiff <= timedelta(minutes=5)):
return SecondLocator(bysecond=range(0, 60, 30)), SecondLocator(bysecond=range(0, 60, 10))
elif (timedelta(seconds=30) < tdiff) & (tdiff <= timedelta(minutes=1)):
return SecondLocator(bysecond=range(0, 60, 10)), SecondLocator(bysecond=range(0, 60, 2))
else:
return SecondLocator(bysecond=range(0, 60, 2)), SecondLocator(bysecond=range(0, 60, 1)) | python | def timeticks(tdiff):
"""
NOTE do NOT use "interval" or ticks are misaligned! use "bysecond" only!
"""
if isinstance(tdiff, xarray.DataArray): # len==1
tdiff = timedelta(seconds=tdiff.values / np.timedelta64(1, 's'))
assert isinstance(tdiff, timedelta), 'expecting datetime.timedelta'
if tdiff > timedelta(hours=2):
return None, None
elif tdiff > timedelta(minutes=20):
return MinuteLocator(byminute=range(0, 60, 5)), MinuteLocator(byminute=range(0, 60, 2))
elif (timedelta(minutes=10) < tdiff) & (tdiff <= timedelta(minutes=20)):
return MinuteLocator(byminute=range(0, 60, 2)), MinuteLocator(byminute=range(0, 60, 1))
elif (timedelta(minutes=5) < tdiff) & (tdiff <= timedelta(minutes=10)):
return MinuteLocator(byminute=range(0, 60, 1)), SecondLocator(bysecond=range(0, 60, 30))
elif (timedelta(minutes=1) < tdiff) & (tdiff <= timedelta(minutes=5)):
return SecondLocator(bysecond=range(0, 60, 30)), SecondLocator(bysecond=range(0, 60, 10))
elif (timedelta(seconds=30) < tdiff) & (tdiff <= timedelta(minutes=1)):
return SecondLocator(bysecond=range(0, 60, 10)), SecondLocator(bysecond=range(0, 60, 2))
else:
return SecondLocator(bysecond=range(0, 60, 2)), SecondLocator(bysecond=range(0, 60, 1)) | [
"def",
"timeticks",
"(",
"tdiff",
")",
":",
"if",
"isinstance",
"(",
"tdiff",
",",
"xarray",
".",
"DataArray",
")",
":",
"# len==1",
"tdiff",
"=",
"timedelta",
"(",
"seconds",
"=",
"tdiff",
".",
"values",
"/",
"np",
".",
"timedelta64",
"(",
"1",
",",
... | NOTE do NOT use "interval" or ticks are misaligned! use "bysecond" only! | [
"NOTE",
"do",
"NOT",
"use",
"interval",
"or",
"ticks",
"are",
"misaligned!",
"use",
"bysecond",
"only!"
] | a713389e027b42d26875cf227450a5d7c6696000 | https://github.com/scivision/sciencedates/blob/a713389e027b42d26875cf227450a5d7c6696000/sciencedates/ticks.py#L24-L52 | train | 55,314 |
fedora-infra/fedmsg-atomic-composer | fedmsg_atomic_composer/consumer.py | AtomicConsumer.consume | def consume(self, msg):
"""Called with each incoming fedmsg.
From here we trigger an rpm-ostree compose by touching a specific file
under the `touch_dir`. Then our `doRead` method is called with the
output of the rpm-ostree-toolbox treecompose, which we monitor to
determine when it has completed.
"""
self.log.info(msg)
body = msg['body']
topic = body['topic']
repo = None
if 'rawhide' in topic:
arch = body['msg']['arch']
self.log.info('New rawhide %s compose ready', arch)
repo = 'rawhide'
elif 'branched' in topic:
arch = body['msg']['arch']
branch = body['msg']['branch']
self.log.info('New %s %s branched compose ready', branch, arch)
log = body['msg']['log']
if log != 'done':
self.log.warn('Compose not done?')
return
repo = branch
elif 'updates.fedora' in topic:
self.log.info('New Fedora %(release)s %(repo)s compose ready',
body['msg'])
repo = 'f%(release)s-%(repo)s' % body['msg']
else:
self.log.warn('Unknown topic: %s', topic)
release = self.releases[repo]
reactor.callInThread(self.compose, release) | python | def consume(self, msg):
"""Called with each incoming fedmsg.
From here we trigger an rpm-ostree compose by touching a specific file
under the `touch_dir`. Then our `doRead` method is called with the
output of the rpm-ostree-toolbox treecompose, which we monitor to
determine when it has completed.
"""
self.log.info(msg)
body = msg['body']
topic = body['topic']
repo = None
if 'rawhide' in topic:
arch = body['msg']['arch']
self.log.info('New rawhide %s compose ready', arch)
repo = 'rawhide'
elif 'branched' in topic:
arch = body['msg']['arch']
branch = body['msg']['branch']
self.log.info('New %s %s branched compose ready', branch, arch)
log = body['msg']['log']
if log != 'done':
self.log.warn('Compose not done?')
return
repo = branch
elif 'updates.fedora' in topic:
self.log.info('New Fedora %(release)s %(repo)s compose ready',
body['msg'])
repo = 'f%(release)s-%(repo)s' % body['msg']
else:
self.log.warn('Unknown topic: %s', topic)
release = self.releases[repo]
reactor.callInThread(self.compose, release) | [
"def",
"consume",
"(",
"self",
",",
"msg",
")",
":",
"self",
".",
"log",
".",
"info",
"(",
"msg",
")",
"body",
"=",
"msg",
"[",
"'body'",
"]",
"topic",
"=",
"body",
"[",
"'topic'",
"]",
"repo",
"=",
"None",
"if",
"'rawhide'",
"in",
"topic",
":",
... | Called with each incoming fedmsg.
From here we trigger an rpm-ostree compose by touching a specific file
under the `touch_dir`. Then our `doRead` method is called with the
output of the rpm-ostree-toolbox treecompose, which we monitor to
determine when it has completed. | [
"Called",
"with",
"each",
"incoming",
"fedmsg",
"."
] | 9be9fd4955af0568f8743d7a1a243cd8f70020c3 | https://github.com/fedora-infra/fedmsg-atomic-composer/blob/9be9fd4955af0568f8743d7a1a243cd8f70020c3/fedmsg_atomic_composer/consumer.py#L41-L75 | train | 55,315 |
phensley/gstatsd | gstatsd/service.py | parse_addr | def parse_addr(text):
"Parse a 1- to 3-part address spec."
if text:
parts = text.split(':')
length = len(parts)
if length== 3:
return parts[0], parts[1], int(parts[2])
elif length == 2:
return None, parts[0], int(parts[1])
elif length == 1:
return None, '', int(parts[0])
return None, None, None | python | def parse_addr(text):
"Parse a 1- to 3-part address spec."
if text:
parts = text.split(':')
length = len(parts)
if length== 3:
return parts[0], parts[1], int(parts[2])
elif length == 2:
return None, parts[0], int(parts[1])
elif length == 1:
return None, '', int(parts[0])
return None, None, None | [
"def",
"parse_addr",
"(",
"text",
")",
":",
"if",
"text",
":",
"parts",
"=",
"text",
".",
"split",
"(",
"':'",
")",
"length",
"=",
"len",
"(",
"parts",
")",
"if",
"length",
"==",
"3",
":",
"return",
"parts",
"[",
"0",
"]",
",",
"parts",
"[",
"1... | Parse a 1- to 3-part address spec. | [
"Parse",
"a",
"1",
"-",
"to",
"3",
"-",
"part",
"address",
"spec",
"."
] | c6d3d22f162d236c1ef916064670c6dc5bce6142 | https://github.com/phensley/gstatsd/blob/c6d3d22f162d236c1ef916064670c6dc5bce6142/gstatsd/service.py#L79-L90 | train | 55,316 |
phensley/gstatsd | gstatsd/service.py | StatsDaemon.start | def start(self):
"Start the service"
# register signals
gevent.signal(signal.SIGINT, self._shutdown)
# spawn the flush trigger
def _flush_impl():
while 1:
gevent.sleep(self._stats.interval)
# rotate stats
stats = self._stats
self._reset_stats()
# send the stats to the sink which in turn broadcasts
# the stats packet to one or more hosts.
try:
self._sink.send(stats)
except Exception, ex:
trace = traceback.format_tb(sys.exc_info()[-1])
self.error(''.join(trace))
self._flush_task = gevent.spawn(_flush_impl)
# start accepting connections
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
self._sock.bind(self._bindaddr)
while 1:
try:
data, _ = self._sock.recvfrom(MAX_PACKET)
for p in data.split('\n'):
if p:
self._process(p)
except Exception, ex:
self.error(str(ex)) | python | def start(self):
"Start the service"
# register signals
gevent.signal(signal.SIGINT, self._shutdown)
# spawn the flush trigger
def _flush_impl():
while 1:
gevent.sleep(self._stats.interval)
# rotate stats
stats = self._stats
self._reset_stats()
# send the stats to the sink which in turn broadcasts
# the stats packet to one or more hosts.
try:
self._sink.send(stats)
except Exception, ex:
trace = traceback.format_tb(sys.exc_info()[-1])
self.error(''.join(trace))
self._flush_task = gevent.spawn(_flush_impl)
# start accepting connections
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
self._sock.bind(self._bindaddr)
while 1:
try:
data, _ = self._sock.recvfrom(MAX_PACKET)
for p in data.split('\n'):
if p:
self._process(p)
except Exception, ex:
self.error(str(ex)) | [
"def",
"start",
"(",
"self",
")",
":",
"# register signals",
"gevent",
".",
"signal",
"(",
"signal",
".",
"SIGINT",
",",
"self",
".",
"_shutdown",
")",
"# spawn the flush trigger",
"def",
"_flush_impl",
"(",
")",
":",
"while",
"1",
":",
"gevent",
".",
"sle... | Start the service | [
"Start",
"the",
"service"
] | c6d3d22f162d236c1ef916064670c6dc5bce6142 | https://github.com/phensley/gstatsd/blob/c6d3d22f162d236c1ef916064670c6dc5bce6142/gstatsd/service.py#L147-L182 | train | 55,317 |
phensley/gstatsd | gstatsd/service.py | StatsDaemon._process | def _process(self, data):
"Process a single packet and update the internal tables."
parts = data.split(':')
if self._debug:
self.error('packet: %r' % data)
if not parts:
return
# interpret the packet and update stats
stats = self._stats
key = parts[0].translate(KEY_TABLE, KEY_DELETIONS)
if self._key_prefix:
key = '.'.join([self._key_prefix, key])
for part in parts[1:]:
srate = 1.0
fields = part.split('|')
length = len(fields)
if length < 2:
continue
value = fields[0]
stype = fields[1].strip()
with stats_lock:
# timer (milliseconds)
if stype == 'ms':
stats.timers[key].append(float(value if value else 0))
# counter with optional sample rate
elif stype == 'c':
if length == 3 and fields[2].startswith('@'):
srate = float(fields[2][1:])
value = float(value if value else 1) * (1 / srate)
stats.counts[key] += value
elif stype == 'g':
value = float(value if value else 1)
stats.gauges[key] = value | python | def _process(self, data):
"Process a single packet and update the internal tables."
parts = data.split(':')
if self._debug:
self.error('packet: %r' % data)
if not parts:
return
# interpret the packet and update stats
stats = self._stats
key = parts[0].translate(KEY_TABLE, KEY_DELETIONS)
if self._key_prefix:
key = '.'.join([self._key_prefix, key])
for part in parts[1:]:
srate = 1.0
fields = part.split('|')
length = len(fields)
if length < 2:
continue
value = fields[0]
stype = fields[1].strip()
with stats_lock:
# timer (milliseconds)
if stype == 'ms':
stats.timers[key].append(float(value if value else 0))
# counter with optional sample rate
elif stype == 'c':
if length == 3 and fields[2].startswith('@'):
srate = float(fields[2][1:])
value = float(value if value else 1) * (1 / srate)
stats.counts[key] += value
elif stype == 'g':
value = float(value if value else 1)
stats.gauges[key] = value | [
"def",
"_process",
"(",
"self",
",",
"data",
")",
":",
"parts",
"=",
"data",
".",
"split",
"(",
"':'",
")",
"if",
"self",
".",
"_debug",
":",
"self",
".",
"error",
"(",
"'packet: %r'",
"%",
"data",
")",
"if",
"not",
"parts",
":",
"return",
"# inter... | Process a single packet and update the internal tables. | [
"Process",
"a",
"single",
"packet",
"and",
"update",
"the",
"internal",
"tables",
"."
] | c6d3d22f162d236c1ef916064670c6dc5bce6142 | https://github.com/phensley/gstatsd/blob/c6d3d22f162d236c1ef916064670c6dc5bce6142/gstatsd/service.py#L188-L223 | train | 55,318 |
pauleveritt/kaybee | kaybee/plugins/articles/base_article.py | BaseArticle.section | def section(self, resources):
""" Which section is this in, if any """
section = [p for p in self.parents(resources) if p.rtype == 'section']
if section:
return section[0]
return None | python | def section(self, resources):
""" Which section is this in, if any """
section = [p for p in self.parents(resources) if p.rtype == 'section']
if section:
return section[0]
return None | [
"def",
"section",
"(",
"self",
",",
"resources",
")",
":",
"section",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"parents",
"(",
"resources",
")",
"if",
"p",
".",
"rtype",
"==",
"'section'",
"]",
"if",
"section",
":",
"return",
"section",
"[",
"... | Which section is this in, if any | [
"Which",
"section",
"is",
"this",
"in",
"if",
"any"
] | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | https://github.com/pauleveritt/kaybee/blob/a00a718aaaa23b2d12db30dfacb6b2b6ec84459c/kaybee/plugins/articles/base_article.py#L35-L41 | train | 55,319 |
pauleveritt/kaybee | kaybee/plugins/articles/base_article.py | BaseArticle.in_navitem | def in_navitem(self, resources, nav_href):
""" Given href of nav item, determine if resource is in it """
# The navhref might end with '/index' so remove it if so
if nav_href.endswith('/index'):
nav_href = nav_href[:-6]
return self.docname.startswith(nav_href) | python | def in_navitem(self, resources, nav_href):
""" Given href of nav item, determine if resource is in it """
# The navhref might end with '/index' so remove it if so
if nav_href.endswith('/index'):
nav_href = nav_href[:-6]
return self.docname.startswith(nav_href) | [
"def",
"in_navitem",
"(",
"self",
",",
"resources",
",",
"nav_href",
")",
":",
"# The navhref might end with '/index' so remove it if so",
"if",
"nav_href",
".",
"endswith",
"(",
"'/index'",
")",
":",
"nav_href",
"=",
"nav_href",
"[",
":",
"-",
"6",
"]",
"return... | Given href of nav item, determine if resource is in it | [
"Given",
"href",
"of",
"nav",
"item",
"determine",
"if",
"resource",
"is",
"in",
"it"
] | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | https://github.com/pauleveritt/kaybee/blob/a00a718aaaa23b2d12db30dfacb6b2b6ec84459c/kaybee/plugins/articles/base_article.py#L43-L50 | train | 55,320 |
pauleveritt/kaybee | kaybee/plugins/articles/base_article.py | BaseArticle.is_published | def is_published(self):
""" Return true if this resource has published date in the past """
now = datetime.now()
published = self.props.published
if published:
return published < now
return False | python | def is_published(self):
""" Return true if this resource has published date in the past """
now = datetime.now()
published = self.props.published
if published:
return published < now
return False | [
"def",
"is_published",
"(",
"self",
")",
":",
"now",
"=",
"datetime",
".",
"now",
"(",
")",
"published",
"=",
"self",
".",
"props",
".",
"published",
"if",
"published",
":",
"return",
"published",
"<",
"now",
"return",
"False"
] | Return true if this resource has published date in the past | [
"Return",
"true",
"if",
"this",
"resource",
"has",
"published",
"date",
"in",
"the",
"past"
] | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | https://github.com/pauleveritt/kaybee/blob/a00a718aaaa23b2d12db30dfacb6b2b6ec84459c/kaybee/plugins/articles/base_article.py#L53-L60 | train | 55,321 |
MacHu-GWU/crawlib-project | crawlib/downloader/selenium_downloader.py | BaseSeleliumDownloader._create_driver | def _create_driver(self, **kwargs):
"""
Create webdriver, assign it to ``self.driver``, and run webdriver
initiation process, which is usually used for manual login.
"""
if self.driver is None:
self.driver = self.create_driver(**kwargs)
self.init_driver_func(self.driver) | python | def _create_driver(self, **kwargs):
"""
Create webdriver, assign it to ``self.driver``, and run webdriver
initiation process, which is usually used for manual login.
"""
if self.driver is None:
self.driver = self.create_driver(**kwargs)
self.init_driver_func(self.driver) | [
"def",
"_create_driver",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"driver",
"is",
"None",
":",
"self",
".",
"driver",
"=",
"self",
".",
"create_driver",
"(",
"*",
"*",
"kwargs",
")",
"self",
".",
"init_driver_func",
"(",
"se... | Create webdriver, assign it to ``self.driver``, and run webdriver
initiation process, which is usually used for manual login. | [
"Create",
"webdriver",
"assign",
"it",
"to",
"self",
".",
"driver",
"and",
"run",
"webdriver",
"initiation",
"process",
"which",
"is",
"usually",
"used",
"for",
"manual",
"login",
"."
] | 241516f2a7a0a32c692f7af35a1f44064e8ce1ab | https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/downloader/selenium_downloader.py#L86-L93 | train | 55,322 |
Yipit/ejson | ejson/serializers.py | deserialize_time | def deserialize_time(data):
"""Return a time instance based on the values of the data param"""
parsed = parser.parse(data)
return parsed.time().replace(tzinfo=parsed.tzinfo) | python | def deserialize_time(data):
"""Return a time instance based on the values of the data param"""
parsed = parser.parse(data)
return parsed.time().replace(tzinfo=parsed.tzinfo) | [
"def",
"deserialize_time",
"(",
"data",
")",
":",
"parsed",
"=",
"parser",
".",
"parse",
"(",
"data",
")",
"return",
"parsed",
".",
"time",
"(",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"parsed",
".",
"tzinfo",
")"
] | Return a time instance based on the values of the data param | [
"Return",
"a",
"time",
"instance",
"based",
"on",
"the",
"values",
"of",
"the",
"data",
"param"
] | 6665703f1534923d1c30849e08339f0ff97d8230 | https://github.com/Yipit/ejson/blob/6665703f1534923d1c30849e08339f0ff97d8230/ejson/serializers.py#L74-L77 | train | 55,323 |
nwhitehead/pineapple-module | pineapple/require.py | require | def require(*args, **kwargs):
'''
Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements.txt pip format. This lets notebooks
specify which versions of packages they need inside the notebook
itself.
This function is the general-purpose interface that lets
the caller specify any version string for any package.
'''
# If called with no arguments, returns requirements list
if not args and not kwargs:
return freeze()
# Construct array of requirements
requirements = list(args)
extra = ['{}{}'.format(kw, kwargs[kw]) for kw in kwargs]
requirements.extend(extra)
args = ['install', '-q']
args.extend(requirements)
pip.main(args) | python | def require(*args, **kwargs):
'''
Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements.txt pip format. This lets notebooks
specify which versions of packages they need inside the notebook
itself.
This function is the general-purpose interface that lets
the caller specify any version string for any package.
'''
# If called with no arguments, returns requirements list
if not args and not kwargs:
return freeze()
# Construct array of requirements
requirements = list(args)
extra = ['{}{}'.format(kw, kwargs[kw]) for kw in kwargs]
requirements.extend(extra)
args = ['install', '-q']
args.extend(requirements)
pip.main(args) | [
"def",
"require",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# If called with no arguments, returns requirements list",
"if",
"not",
"args",
"and",
"not",
"kwargs",
":",
"return",
"freeze",
"(",
")",
"# Construct array of requirements",
"requirements",
"=... | Install a set of packages using pip
This is designed to be an interface for IPython notebooks that
replicates the requirements.txt pip format. This lets notebooks
specify which versions of packages they need inside the notebook
itself.
This function is the general-purpose interface that lets
the caller specify any version string for any package. | [
"Install",
"a",
"set",
"of",
"packages",
"using",
"pip",
"This",
"is",
"designed",
"to",
"be",
"an",
"interface",
"for",
"IPython",
"notebooks",
"that",
"replicates",
"the",
"requirements",
".",
"txt",
"pip",
"format",
".",
"This",
"lets",
"notebooks",
"spec... | d35ac6d9c1d748dbc06ac568829325c36680b0cb | https://github.com/nwhitehead/pineapple-module/blob/d35ac6d9c1d748dbc06ac568829325c36680b0cb/pineapple/require.py#L17-L40 | train | 55,324 |
paltman-archive/nashvegas | nashvegas/management/commands/comparedb.py | Command.handle | def handle(self, *args, **options):
"""
Compares current database with a migrations.
Creates a temporary database, applies all the migrations to it, and
then dumps the schema from both current and temporary, diffs them,
then report the diffs to the user.
"""
self.db = options.get("database", DEFAULT_DB_ALIAS)
self.current_name = connections[self.db].settings_dict["NAME"]
self.compare_name = options.get("db_name")
self.lines = options.get("lines")
self.ignore = int(options.get('ignore'))
if not self.compare_name:
self.compare_name = "%s_compare" % self.current_name
command = NASHVEGAS.get("dumpdb", "pg_dump -s {dbname}")
print "Getting schema for current database..."
current_sql = Popen(
command.format(dbname=self.current_name),
shell=True,
stdout=PIPE
).stdout.readlines()
print "Getting schema for fresh database..."
self.setup_database()
connections[self.db].close()
connections[self.db].settings_dict["NAME"] = self.compare_name
try:
call_command("syncdb", interactive=False, verbosity=0, migrations=False)
new_sql = Popen(
command.format(dbname=self.compare_name).split(),
stdout=PIPE
).stdout.readlines()
finally:
connections[self.db].close()
connections[self.db].settings_dict["NAME"] = self.current_name
self.teardown_database()
print "Outputing diff between the two..."
print "".join(difflib.unified_diff(normalize_sql(current_sql, self.ignore),
normalize_sql(new_sql, self.ignore),
n=int(self.lines))) | python | def handle(self, *args, **options):
"""
Compares current database with a migrations.
Creates a temporary database, applies all the migrations to it, and
then dumps the schema from both current and temporary, diffs them,
then report the diffs to the user.
"""
self.db = options.get("database", DEFAULT_DB_ALIAS)
self.current_name = connections[self.db].settings_dict["NAME"]
self.compare_name = options.get("db_name")
self.lines = options.get("lines")
self.ignore = int(options.get('ignore'))
if not self.compare_name:
self.compare_name = "%s_compare" % self.current_name
command = NASHVEGAS.get("dumpdb", "pg_dump -s {dbname}")
print "Getting schema for current database..."
current_sql = Popen(
command.format(dbname=self.current_name),
shell=True,
stdout=PIPE
).stdout.readlines()
print "Getting schema for fresh database..."
self.setup_database()
connections[self.db].close()
connections[self.db].settings_dict["NAME"] = self.compare_name
try:
call_command("syncdb", interactive=False, verbosity=0, migrations=False)
new_sql = Popen(
command.format(dbname=self.compare_name).split(),
stdout=PIPE
).stdout.readlines()
finally:
connections[self.db].close()
connections[self.db].settings_dict["NAME"] = self.current_name
self.teardown_database()
print "Outputing diff between the two..."
print "".join(difflib.unified_diff(normalize_sql(current_sql, self.ignore),
normalize_sql(new_sql, self.ignore),
n=int(self.lines))) | [
"def",
"handle",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"options",
")",
":",
"self",
".",
"db",
"=",
"options",
".",
"get",
"(",
"\"database\"",
",",
"DEFAULT_DB_ALIAS",
")",
"self",
".",
"current_name",
"=",
"connections",
"[",
"self",
".",
"... | Compares current database with a migrations.
Creates a temporary database, applies all the migrations to it, and
then dumps the schema from both current and temporary, diffs them,
then report the diffs to the user. | [
"Compares",
"current",
"database",
"with",
"a",
"migrations",
".",
"Creates",
"a",
"temporary",
"database",
"applies",
"all",
"the",
"migrations",
"to",
"it",
"and",
"then",
"dumps",
"the",
"schema",
"from",
"both",
"current",
"and",
"temporary",
"diffs",
"the... | 14e904a3f5b87e878cd053b554e76e85943d1c11 | https://github.com/paltman-archive/nashvegas/blob/14e904a3f5b87e878cd053b554e76e85943d1c11/nashvegas/management/commands/comparedb.py#L71-L115 | train | 55,325 |
pauleveritt/kaybee | kaybee/plugins/widgets/handlers.py | render_widgets | def render_widgets(kb_app: kb,
sphinx_app: Sphinx,
doctree: doctree,
fromdocname: str,
):
""" Go through docs and replace widget directive with rendering """
builder: StandaloneHTMLBuilder = sphinx_app.builder
for node in doctree.traverse(widget):
# Render the output
w = sphinx_app.env.widgets.get(node.name)
context = builder.globalcontext.copy()
# Add in certain globals
context['resources'] = sphinx_app.env.resources
context['references'] = sphinx_app.env.references
output = w.render(sphinx_app, context)
# Put the output into the node contents
listing = [nodes.raw('', output, format='html')]
node.replace_self(listing) | python | def render_widgets(kb_app: kb,
sphinx_app: Sphinx,
doctree: doctree,
fromdocname: str,
):
""" Go through docs and replace widget directive with rendering """
builder: StandaloneHTMLBuilder = sphinx_app.builder
for node in doctree.traverse(widget):
# Render the output
w = sphinx_app.env.widgets.get(node.name)
context = builder.globalcontext.copy()
# Add in certain globals
context['resources'] = sphinx_app.env.resources
context['references'] = sphinx_app.env.references
output = w.render(sphinx_app, context)
# Put the output into the node contents
listing = [nodes.raw('', output, format='html')]
node.replace_self(listing) | [
"def",
"render_widgets",
"(",
"kb_app",
":",
"kb",
",",
"sphinx_app",
":",
"Sphinx",
",",
"doctree",
":",
"doctree",
",",
"fromdocname",
":",
"str",
",",
")",
":",
"builder",
":",
"StandaloneHTMLBuilder",
"=",
"sphinx_app",
".",
"builder",
"for",
"node",
"... | Go through docs and replace widget directive with rendering | [
"Go",
"through",
"docs",
"and",
"replace",
"widget",
"directive",
"with",
"rendering"
] | a00a718aaaa23b2d12db30dfacb6b2b6ec84459c | https://github.com/pauleveritt/kaybee/blob/a00a718aaaa23b2d12db30dfacb6b2b6ec84459c/kaybee/plugins/widgets/handlers.py#L63-L84 | train | 55,326 |
jingming/spotify | spotify/auth/client.py | Client.auth_string | def auth_string(self):
"""
Get the auth string. If the token is expired and auto refresh enabled,
a new token will be fetched
:return: the auth string
:rtype: str
"""
if not self._token:
self.execute()
if not self._token.expired:
return 'Bearer {}'.format(self._token.access_token)
if self.auto_refresh:
self.execute()
return 'Bearer {}'.format(self._token.access_token)
raise TokenExpired() | python | def auth_string(self):
"""
Get the auth string. If the token is expired and auto refresh enabled,
a new token will be fetched
:return: the auth string
:rtype: str
"""
if not self._token:
self.execute()
if not self._token.expired:
return 'Bearer {}'.format(self._token.access_token)
if self.auto_refresh:
self.execute()
return 'Bearer {}'.format(self._token.access_token)
raise TokenExpired() | [
"def",
"auth_string",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_token",
":",
"self",
".",
"execute",
"(",
")",
"if",
"not",
"self",
".",
"_token",
".",
"expired",
":",
"return",
"'Bearer {}'",
".",
"format",
"(",
"self",
".",
"_token",
".",
... | Get the auth string. If the token is expired and auto refresh enabled,
a new token will be fetched
:return: the auth string
:rtype: str | [
"Get",
"the",
"auth",
"string",
".",
"If",
"the",
"token",
"is",
"expired",
"and",
"auto",
"refresh",
"enabled",
"a",
"new",
"token",
"will",
"be",
"fetched"
] | d92c71073b2515f3c850604114133a7d2022d1a4 | https://github.com/jingming/spotify/blob/d92c71073b2515f3c850604114133a7d2022d1a4/spotify/auth/client.py#L27-L45 | train | 55,327 |
codeforamerica/epa_python | epa/pcs/pcs.py | PCS.admin_penalty | def admin_penalty(self, column=None, value=None, **kwargs):
"""
An enforcement action that results in levying the permit holder with a
penalty or fine. It is used to track judicial hearing dates, penalty
amounts, and type of administrative penalty order.
>>> PCS().admin_penalty('enfor_action_date', '16-MAR-01')
"""
return self._resolve_call('PCS_ADMIN_PENALTY_ORDER', column,
value, **kwargs) | python | def admin_penalty(self, column=None, value=None, **kwargs):
"""
An enforcement action that results in levying the permit holder with a
penalty or fine. It is used to track judicial hearing dates, penalty
amounts, and type of administrative penalty order.
>>> PCS().admin_penalty('enfor_action_date', '16-MAR-01')
"""
return self._resolve_call('PCS_ADMIN_PENALTY_ORDER', column,
value, **kwargs) | [
"def",
"admin_penalty",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'PCS_ADMIN_PENALTY_ORDER'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs",... | An enforcement action that results in levying the permit holder with a
penalty or fine. It is used to track judicial hearing dates, penalty
amounts, and type of administrative penalty order.
>>> PCS().admin_penalty('enfor_action_date', '16-MAR-01') | [
"An",
"enforcement",
"action",
"that",
"results",
"in",
"levying",
"the",
"permit",
"holder",
"with",
"a",
"penalty",
"or",
"fine",
".",
"It",
"is",
"used",
"to",
"track",
"judicial",
"hearing",
"dates",
"penalty",
"amounts",
"and",
"type",
"of",
"administra... | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L24-L33 | train | 55,328 |
codeforamerica/epa_python | epa/pcs/pcs.py | PCS.compliance_schedule | def compliance_schedule(self, column=None, value=None, **kwargs):
"""
A sequence of activities with associated milestones which pertains to a
given permit.
>>> PCS().compliance_schedule('cmpl_schd_evt', '62099')
"""
return self._resolve_call('PCS_CMPL_SCHD', column, value, **kwargs) | python | def compliance_schedule(self, column=None, value=None, **kwargs):
"""
A sequence of activities with associated milestones which pertains to a
given permit.
>>> PCS().compliance_schedule('cmpl_schd_evt', '62099')
"""
return self._resolve_call('PCS_CMPL_SCHD', column, value, **kwargs) | [
"def",
"compliance_schedule",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'PCS_CMPL_SCHD'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs",
"... | A sequence of activities with associated milestones which pertains to a
given permit.
>>> PCS().compliance_schedule('cmpl_schd_evt', '62099') | [
"A",
"sequence",
"of",
"activities",
"with",
"associated",
"milestones",
"which",
"pertains",
"to",
"a",
"given",
"permit",
"."
] | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L58-L65 | train | 55,329 |
codeforamerica/epa_python | epa/pcs/pcs.py | PCS.compliance_violation | def compliance_violation(self, column=None, value=None, **kwargs):
"""
A compliance schedule violation reflects the non-achievement of a
given compliance schedule event including the type of violation and ty
pe of resolution.
>>> PCS().compliance_violation('cs_rnc_detect_date', '16-MAR-04')
"""
return self._resolve_call('PCS_CMPL_SCHD_VIOL', column, value, **kwargs) | python | def compliance_violation(self, column=None, value=None, **kwargs):
"""
A compliance schedule violation reflects the non-achievement of a
given compliance schedule event including the type of violation and ty
pe of resolution.
>>> PCS().compliance_violation('cs_rnc_detect_date', '16-MAR-04')
"""
return self._resolve_call('PCS_CMPL_SCHD_VIOL', column, value, **kwargs) | [
"def",
"compliance_violation",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'PCS_CMPL_SCHD_VIOL'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs... | A compliance schedule violation reflects the non-achievement of a
given compliance schedule event including the type of violation and ty
pe of resolution.
>>> PCS().compliance_violation('cs_rnc_detect_date', '16-MAR-04') | [
"A",
"compliance",
"schedule",
"violation",
"reflects",
"the",
"non",
"-",
"achievement",
"of",
"a",
"given",
"compliance",
"schedule",
"event",
"including",
"the",
"type",
"of",
"violation",
"and",
"ty",
"pe",
"of",
"resolution",
"."
] | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L67-L75 | train | 55,330 |
codeforamerica/epa_python | epa/pcs/pcs.py | PCS.enforcement_action | def enforcement_action(self, column=None, value=None, **kwargs):
"""
A disciplinary action taken against a permit facility. The action may
be applicable to one or more violations.
>>> PCS().enforcement_action('ea_code', '09')
"""
return self._resolve_call('PCS_ENFOR_ACTION', column, value, **kwargs) | python | def enforcement_action(self, column=None, value=None, **kwargs):
"""
A disciplinary action taken against a permit facility. The action may
be applicable to one or more violations.
>>> PCS().enforcement_action('ea_code', '09')
"""
return self._resolve_call('PCS_ENFOR_ACTION', column, value, **kwargs) | [
"def",
"enforcement_action",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'PCS_ENFOR_ACTION'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs",
... | A disciplinary action taken against a permit facility. The action may
be applicable to one or more violations.
>>> PCS().enforcement_action('ea_code', '09') | [
"A",
"disciplinary",
"action",
"taken",
"against",
"a",
"permit",
"facility",
".",
"The",
"action",
"may",
"be",
"applicable",
"to",
"one",
"or",
"more",
"violations",
"."
] | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L87-L94 | train | 55,331 |
codeforamerica/epa_python | epa/pcs/pcs.py | PCS.hearing | def hearing(self, column=None, value=None, **kwargs):
"""
An evidentiary hearing.
>>> PCS().hearing('event_date', '23-MAY-01')
"""
return self._resolve_call('PCS_EVIDENTIARY_HEARING_EVENT', column,
value, **kwargs) | python | def hearing(self, column=None, value=None, **kwargs):
"""
An evidentiary hearing.
>>> PCS().hearing('event_date', '23-MAY-01')
"""
return self._resolve_call('PCS_EVIDENTIARY_HEARING_EVENT', column,
value, **kwargs) | [
"def",
"hearing",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'PCS_EVIDENTIARY_HEARING_EVENT'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs",... | An evidentiary hearing.
>>> PCS().hearing('event_date', '23-MAY-01') | [
"An",
"evidentiary",
"hearing",
"."
] | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L115-L122 | train | 55,332 |
codeforamerica/epa_python | epa/pcs/pcs.py | PCS.industrial_user | def industrial_user(self, column=None, value=None, **kwargs):
"""
Information from the PCI_AUDIT table pertaining to industrial users,
i.e. the number of significant industrial users.
>>> PCS().industrial_user('insp_date', '16-MAR-01')
"""
return self._resolve_call('PCS_INDUSTRIAL_USER_INFO', column,
value, **kwargs) | python | def industrial_user(self, column=None, value=None, **kwargs):
"""
Information from the PCI_AUDIT table pertaining to industrial users,
i.e. the number of significant industrial users.
>>> PCS().industrial_user('insp_date', '16-MAR-01')
"""
return self._resolve_call('PCS_INDUSTRIAL_USER_INFO', column,
value, **kwargs) | [
"def",
"industrial_user",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'PCS_INDUSTRIAL_USER_INFO'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwarg... | Information from the PCI_AUDIT table pertaining to industrial users,
i.e. the number of significant industrial users.
>>> PCS().industrial_user('insp_date', '16-MAR-01') | [
"Information",
"from",
"the",
"PCI_AUDIT",
"table",
"pertaining",
"to",
"industrial",
"users",
"i",
".",
"e",
".",
"the",
"number",
"of",
"significant",
"industrial",
"users",
"."
] | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L124-L132 | train | 55,333 |
codeforamerica/epa_python | epa/pcs/pcs.py | PCS.permit_event | def permit_event(self, column=None, value=None, **kwargs):
"""
A permit event tracks the lifecycle of a permit from issuance to
expiration. Examples include 'Application Received' and 'Permit
Issued', etc.
>>> PCS().permit_event('event_actual_date', '16-MAR-04')
"""
return self._resolve_call('PCS_PERMIT_EVENT', column, value, **kwargs) | python | def permit_event(self, column=None, value=None, **kwargs):
"""
A permit event tracks the lifecycle of a permit from issuance to
expiration. Examples include 'Application Received' and 'Permit
Issued', etc.
>>> PCS().permit_event('event_actual_date', '16-MAR-04')
"""
return self._resolve_call('PCS_PERMIT_EVENT', column, value, **kwargs) | [
"def",
"permit_event",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'PCS_PERMIT_EVENT'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs",
")"
] | A permit event tracks the lifecycle of a permit from issuance to
expiration. Examples include 'Application Received' and 'Permit
Issued', etc.
>>> PCS().permit_event('event_actual_date', '16-MAR-04') | [
"A",
"permit",
"event",
"tracks",
"the",
"lifecycle",
"of",
"a",
"permit",
"from",
"issuance",
"to",
"expiration",
".",
"Examples",
"include",
"Application",
"Received",
"and",
"Permit",
"Issued",
"etc",
"."
] | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L144-L152 | train | 55,334 |
codeforamerica/epa_python | epa/pcs/pcs.py | PCS.pipe_schedule | def pipe_schedule(self, column=None, value=None, **kwargs):
"""
Particular discharge points at a permit facility that are governed by
effluent limitations and monitoring and submission requirements.
>>> PCS().pipe_schedule('state_submission_units', 'M')
"""
return self._resolve_call('PCS_PIPE_SCHED', column, value, **kwargs) | python | def pipe_schedule(self, column=None, value=None, **kwargs):
"""
Particular discharge points at a permit facility that are governed by
effluent limitations and monitoring and submission requirements.
>>> PCS().pipe_schedule('state_submission_units', 'M')
"""
return self._resolve_call('PCS_PIPE_SCHED', column, value, **kwargs) | [
"def",
"pipe_schedule",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'PCS_PIPE_SCHED'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs",
")"
] | Particular discharge points at a permit facility that are governed by
effluent limitations and monitoring and submission requirements.
>>> PCS().pipe_schedule('state_submission_units', 'M') | [
"Particular",
"discharge",
"points",
"at",
"a",
"permit",
"facility",
"that",
"are",
"governed",
"by",
"effluent",
"limitations",
"and",
"monitoring",
"and",
"submission",
"requirements",
"."
] | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L154-L161 | train | 55,335 |
codeforamerica/epa_python | epa/pcs/pcs.py | PCS.single_violation | def single_violation(self, column=None, value=None, **kwargs):
"""
A single event violation is a one-time event that occurred on a fixed
date, and is associated with one permitted facility.
>>> PCS().single_violation('single_event_viol_date', '16-MAR-01')
"""
return self._resolve_call('PCS_SINGLE_EVENT_VIOL', column,
value, **kwargs) | python | def single_violation(self, column=None, value=None, **kwargs):
"""
A single event violation is a one-time event that occurred on a fixed
date, and is associated with one permitted facility.
>>> PCS().single_violation('single_event_viol_date', '16-MAR-01')
"""
return self._resolve_call('PCS_SINGLE_EVENT_VIOL', column,
value, **kwargs) | [
"def",
"single_violation",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'PCS_SINGLE_EVENT_VIOL'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs"... | A single event violation is a one-time event that occurred on a fixed
date, and is associated with one permitted facility.
>>> PCS().single_violation('single_event_viol_date', '16-MAR-01') | [
"A",
"single",
"event",
"violation",
"is",
"a",
"one",
"-",
"time",
"event",
"that",
"occurred",
"on",
"a",
"fixed",
"date",
"and",
"is",
"associated",
"with",
"one",
"permitted",
"facility",
"."
] | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/pcs/pcs.py#L163-L171 | train | 55,336 |
SteemData/steemdata | steemdata/utils.py | typify | def typify(value: Union[dict, list, set, str]):
""" Enhance block operation with native types.
Typify takes a blockchain operation or dict/list/value,
and then it parses and converts string types into native data types where appropriate.
"""
if type(value) == dict:
return walk_values(typify, value)
if type(value) in [list, set]:
return list(map(typify, value))
if type(value) == str:
if re.match('^\d+\.\d+ (STEEM|SBD|VESTS)$', value):
return keep_in_dict(dict(Amount(value)), ['amount', 'asset'])
if re.match('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$', value):
return parse_time(value)
return value | python | def typify(value: Union[dict, list, set, str]):
""" Enhance block operation with native types.
Typify takes a blockchain operation or dict/list/value,
and then it parses and converts string types into native data types where appropriate.
"""
if type(value) == dict:
return walk_values(typify, value)
if type(value) in [list, set]:
return list(map(typify, value))
if type(value) == str:
if re.match('^\d+\.\d+ (STEEM|SBD|VESTS)$', value):
return keep_in_dict(dict(Amount(value)), ['amount', 'asset'])
if re.match('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$', value):
return parse_time(value)
return value | [
"def",
"typify",
"(",
"value",
":",
"Union",
"[",
"dict",
",",
"list",
",",
"set",
",",
"str",
"]",
")",
":",
"if",
"type",
"(",
"value",
")",
"==",
"dict",
":",
"return",
"walk_values",
"(",
"typify",
",",
"value",
")",
"if",
"type",
"(",
"value... | Enhance block operation with native types.
Typify takes a blockchain operation or dict/list/value,
and then it parses and converts string types into native data types where appropriate. | [
"Enhance",
"block",
"operation",
"with",
"native",
"types",
"."
] | 64dfc6d795deeb922e9041fa53e0946f07708ea1 | https://github.com/SteemData/steemdata/blob/64dfc6d795deeb922e9041fa53e0946f07708ea1/steemdata/utils.py#L12-L31 | train | 55,337 |
SteemData/steemdata | steemdata/utils.py | json_expand | def json_expand(json_op):
""" For custom_json ops. """
if type(json_op) == dict and 'json' in json_op:
return update_in(json_op, ['json'], safe_json_loads)
return json_op | python | def json_expand(json_op):
""" For custom_json ops. """
if type(json_op) == dict and 'json' in json_op:
return update_in(json_op, ['json'], safe_json_loads)
return json_op | [
"def",
"json_expand",
"(",
"json_op",
")",
":",
"if",
"type",
"(",
"json_op",
")",
"==",
"dict",
"and",
"'json'",
"in",
"json_op",
":",
"return",
"update_in",
"(",
"json_op",
",",
"[",
"'json'",
"]",
",",
"safe_json_loads",
")",
"return",
"json_op"
] | For custom_json ops. | [
"For",
"custom_json",
"ops",
"."
] | 64dfc6d795deeb922e9041fa53e0946f07708ea1 | https://github.com/SteemData/steemdata/blob/64dfc6d795deeb922e9041fa53e0946f07708ea1/steemdata/utils.py#L41-L46 | train | 55,338 |
HPCC-Cloud-Computing/CAL | calplus/v1/network/drivers/amazon.py | AmazonDriver.delete | def delete(self, subnet_id):
"""
This is bad delete function
because one vpc can have more than one subnet.
It is Ok if user only use CAL for manage cloud resource
We will update ASAP.
"""
# 1 : show subnet
subnet = self.client.describe_subnets(
SubnetIds=[subnet_id]).get('Subnets')[0]
vpc_id = subnet.get('VpcId')
# 2 : delete subnet
self.client.delete_subnet(SubnetId=subnet_id)
# 3 : delete vpc
return self.client.delete_vpc(VpcId=vpc_id) | python | def delete(self, subnet_id):
"""
This is bad delete function
because one vpc can have more than one subnet.
It is Ok if user only use CAL for manage cloud resource
We will update ASAP.
"""
# 1 : show subnet
subnet = self.client.describe_subnets(
SubnetIds=[subnet_id]).get('Subnets')[0]
vpc_id = subnet.get('VpcId')
# 2 : delete subnet
self.client.delete_subnet(SubnetId=subnet_id)
# 3 : delete vpc
return self.client.delete_vpc(VpcId=vpc_id) | [
"def",
"delete",
"(",
"self",
",",
"subnet_id",
")",
":",
"# 1 : show subnet",
"subnet",
"=",
"self",
".",
"client",
".",
"describe_subnets",
"(",
"SubnetIds",
"=",
"[",
"subnet_id",
"]",
")",
".",
"get",
"(",
"'Subnets'",
")",
"[",
"0",
"]",
"vpc_id",
... | This is bad delete function
because one vpc can have more than one subnet.
It is Ok if user only use CAL for manage cloud resource
We will update ASAP. | [
"This",
"is",
"bad",
"delete",
"function",
"because",
"one",
"vpc",
"can",
"have",
"more",
"than",
"one",
"subnet",
".",
"It",
"is",
"Ok",
"if",
"user",
"only",
"use",
"CAL",
"for",
"manage",
"cloud",
"resource",
"We",
"will",
"update",
"ASAP",
"."
] | 7134b3dfe9ee3a383506a592765c7a12fa4ca1e9 | https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/network/drivers/amazon.py#L103-L117 | train | 55,339 |
GeorgeArgyros/symautomata | symautomata/cfggenerator.py | CFGGenerator._clean_terminals | def _clean_terminals(self):
"""
Because of the optimization, there are some non existing terminals
on the generated list. Remove them by checking for terms in form Ax,x
"""
new_terminals = []
for term in self.grammar.grammar_terminals:
x_term = term.rfind('@')
y_term = term.rfind('A')
if y_term > x_term:
x_term = y_term
ids = term[x_term + 1:].split(',')
if len(ids) < 2:
"""It'input_string a normal terminal, not a state"""
new_terminals.append(term)
self.grammar.grammar_terminals = new_terminals | python | def _clean_terminals(self):
"""
Because of the optimization, there are some non existing terminals
on the generated list. Remove them by checking for terms in form Ax,x
"""
new_terminals = []
for term in self.grammar.grammar_terminals:
x_term = term.rfind('@')
y_term = term.rfind('A')
if y_term > x_term:
x_term = y_term
ids = term[x_term + 1:].split(',')
if len(ids) < 2:
"""It'input_string a normal terminal, not a state"""
new_terminals.append(term)
self.grammar.grammar_terminals = new_terminals | [
"def",
"_clean_terminals",
"(",
"self",
")",
":",
"new_terminals",
"=",
"[",
"]",
"for",
"term",
"in",
"self",
".",
"grammar",
".",
"grammar_terminals",
":",
"x_term",
"=",
"term",
".",
"rfind",
"(",
"'@'",
")",
"y_term",
"=",
"term",
".",
"rfind",
"("... | Because of the optimization, there are some non existing terminals
on the generated list. Remove them by checking for terms in form Ax,x | [
"Because",
"of",
"the",
"optimization",
"there",
"are",
"some",
"non",
"existing",
"terminals",
"on",
"the",
"generated",
"list",
".",
"Remove",
"them",
"by",
"checking",
"for",
"terms",
"in",
"form",
"Ax",
"x"
] | f5d66533573b27e155bec3f36b8c00b8e3937cb3 | https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/cfggenerator.py#L189-L204 | train | 55,340 |
GeorgeArgyros/symautomata | symautomata/cfggenerator.py | CFGGenerator._check_self_replicate | def _check_self_replicate(self, myntr):
"""
For each Rule B -> c where c is a known terminal, this function
searches for B occurences in rules with the form A -> B and sets
A -> c.
"""
# print 'BFS Dictionary Update - Self Replicate'
find = 0
for nonterm in self.grammar.grammar_nonterminals_map:
for i in self.grammar.grammar_nonterminals_map[nonterm]:
if self.grammar.grammar_rules[i][0] not in self.resolved and not isinstance(
self.grammar.grammar_rules[i][1], (set, tuple)) \
and self.grammar.grammar_rules[i][1] == myntr:
self.resolved[self.grammar.grammar_rules[i][0]] = self.resolved[myntr]
if self._checkfinal(self.grammar.grammar_rules[i][0]):
return self.resolved[self.grammar.grammar_rules[i][0]]
if self.grammar.grammar_rules[i][0] not in self.bfs_queue:
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
find = 1
if find == 1:
return 1
return 0 | python | def _check_self_replicate(self, myntr):
"""
For each Rule B -> c where c is a known terminal, this function
searches for B occurences in rules with the form A -> B and sets
A -> c.
"""
# print 'BFS Dictionary Update - Self Replicate'
find = 0
for nonterm in self.grammar.grammar_nonterminals_map:
for i in self.grammar.grammar_nonterminals_map[nonterm]:
if self.grammar.grammar_rules[i][0] not in self.resolved and not isinstance(
self.grammar.grammar_rules[i][1], (set, tuple)) \
and self.grammar.grammar_rules[i][1] == myntr:
self.resolved[self.grammar.grammar_rules[i][0]] = self.resolved[myntr]
if self._checkfinal(self.grammar.grammar_rules[i][0]):
return self.resolved[self.grammar.grammar_rules[i][0]]
if self.grammar.grammar_rules[i][0] not in self.bfs_queue:
self.bfs_queue.append(self.grammar.grammar_rules[i][0])
find = 1
if find == 1:
return 1
return 0 | [
"def",
"_check_self_replicate",
"(",
"self",
",",
"myntr",
")",
":",
"# print 'BFS Dictionary Update - Self Replicate'",
"find",
"=",
"0",
"for",
"nonterm",
"in",
"self",
".",
"grammar",
".",
"grammar_nonterminals_map",
":",
"for",
"i",
"in",
"self",
".",
"grammar... | For each Rule B -> c where c is a known terminal, this function
searches for B occurences in rules with the form A -> B and sets
A -> c. | [
"For",
"each",
"Rule",
"B",
"-",
">",
"c",
"where",
"c",
"is",
"a",
"known",
"terminal",
"this",
"function",
"searches",
"for",
"B",
"occurences",
"in",
"rules",
"with",
"the",
"form",
"A",
"-",
">",
"B",
"and",
"sets",
"A",
"-",
">",
"c",
"."
] | f5d66533573b27e155bec3f36b8c00b8e3937cb3 | https://github.com/GeorgeArgyros/symautomata/blob/f5d66533573b27e155bec3f36b8c00b8e3937cb3/symautomata/cfggenerator.py#L287-L308 | train | 55,341 |
Equitable/trump | trump/orm.py | Symbol.describe | def describe(self):
""" describes a Symbol, returns a string """
lines = []
lines.append("Symbol = {}".format(self.name))
if len(self.tags):
tgs = ", ".join(x.tag for x in self.tags)
lines.append(" tagged = {}".format(tgs))
if len(self.aliases):
als = ", ".join(x.alias for x in self.aliases)
lines.append(" aliased = {}".format(als))
if len(self.feeds):
lines.append(" feeds:")
for fed in self.feeds:
lines.append(" {}. {}".format(fed.fnum,
fed.ftype))
return "\n".join(lines) | python | def describe(self):
""" describes a Symbol, returns a string """
lines = []
lines.append("Symbol = {}".format(self.name))
if len(self.tags):
tgs = ", ".join(x.tag for x in self.tags)
lines.append(" tagged = {}".format(tgs))
if len(self.aliases):
als = ", ".join(x.alias for x in self.aliases)
lines.append(" aliased = {}".format(als))
if len(self.feeds):
lines.append(" feeds:")
for fed in self.feeds:
lines.append(" {}. {}".format(fed.fnum,
fed.ftype))
return "\n".join(lines) | [
"def",
"describe",
"(",
"self",
")",
":",
"lines",
"=",
"[",
"]",
"lines",
".",
"append",
"(",
"\"Symbol = {}\"",
".",
"format",
"(",
"self",
".",
"name",
")",
")",
"if",
"len",
"(",
"self",
".",
"tags",
")",
":",
"tgs",
"=",
"\", \"",
".",
"join... | describes a Symbol, returns a string | [
"describes",
"a",
"Symbol",
"returns",
"a",
"string"
] | a2802692bc642fa32096374159eea7ceca2947b4 | https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L1270-L1286 | train | 55,342 |
Equitable/trump | trump/orm.py | Symbol.datatable_df | def datatable_df(self):
""" returns the dataframe representation of the symbol's final data """
data = self._all_datatable_data()
adf = pd.DataFrame(data)
adf.columns = self.dt_all_cols
return self._finish_df(adf, 'ALL') | python | def datatable_df(self):
""" returns the dataframe representation of the symbol's final data """
data = self._all_datatable_data()
adf = pd.DataFrame(data)
adf.columns = self.dt_all_cols
return self._finish_df(adf, 'ALL') | [
"def",
"datatable_df",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"_all_datatable_data",
"(",
")",
"adf",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
")",
"adf",
".",
"columns",
"=",
"self",
".",
"dt_all_cols",
"return",
"self",
".",
"_finish_df",
... | returns the dataframe representation of the symbol's final data | [
"returns",
"the",
"dataframe",
"representation",
"of",
"the",
"symbol",
"s",
"final",
"data"
] | a2802692bc642fa32096374159eea7ceca2947b4 | https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L1478-L1483 | train | 55,343 |
Equitable/trump | trump/orm.py | Symbol._init_datatable | def _init_datatable(self):
"""
Instantiates the .datatable attribute, pointing to a table in the
database that stores all the cached data
"""
try:
self.datatable = Table(self.name, Base.metadata, autoload=True)
except NoSuchTableError:
print "Creating datatable, cause it doesn't exist"
self.datatable = self._datatable_factory()
self.datatable.create()
self.datatable_exists = True | python | def _init_datatable(self):
"""
Instantiates the .datatable attribute, pointing to a table in the
database that stores all the cached data
"""
try:
self.datatable = Table(self.name, Base.metadata, autoload=True)
except NoSuchTableError:
print "Creating datatable, cause it doesn't exist"
self.datatable = self._datatable_factory()
self.datatable.create()
self.datatable_exists = True | [
"def",
"_init_datatable",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"datatable",
"=",
"Table",
"(",
"self",
".",
"name",
",",
"Base",
".",
"metadata",
",",
"autoload",
"=",
"True",
")",
"except",
"NoSuchTableError",
":",
"print",
"\"Creating datatabl... | Instantiates the .datatable attribute, pointing to a table in the
database that stores all the cached data | [
"Instantiates",
"the",
".",
"datatable",
"attribute",
"pointing",
"to",
"a",
"table",
"in",
"the",
"database",
"that",
"stores",
"all",
"the",
"cached",
"data"
] | a2802692bc642fa32096374159eea7ceca2947b4 | https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L1525-L1536 | train | 55,344 |
Equitable/trump | trump/orm.py | Symbol._datatable_factory | def _datatable_factory(self):
"""
creates a SQLAlchemy Table object with the appropriate number of
columns given the number of feeds
"""
feed_cols = ['feed{0:03d}'.format(i + 1) for i in range(self.n_feeds)]
feed_cols = ['override_feed000'] + feed_cols + ['failsafe_feed999']
ind_sqlatyp = indexingtypes[self.index.indimp].sqlatyp
dat_sqlatyp = datadefs[self.dtype.datadef].sqlatyp
atbl = Table(self.name, Base.metadata,
Column('indx', ind_sqlatyp, primary_key=True),
Column('final', dat_sqlatyp),
*(Column(fed_col, dat_sqlatyp) for fed_col in feed_cols),
extend_existing=True)
self.dt_feed_cols = feed_cols[:]
self.dt_all_cols = ['indx', 'final'] + feed_cols[:]
return atbl | python | def _datatable_factory(self):
"""
creates a SQLAlchemy Table object with the appropriate number of
columns given the number of feeds
"""
feed_cols = ['feed{0:03d}'.format(i + 1) for i in range(self.n_feeds)]
feed_cols = ['override_feed000'] + feed_cols + ['failsafe_feed999']
ind_sqlatyp = indexingtypes[self.index.indimp].sqlatyp
dat_sqlatyp = datadefs[self.dtype.datadef].sqlatyp
atbl = Table(self.name, Base.metadata,
Column('indx', ind_sqlatyp, primary_key=True),
Column('final', dat_sqlatyp),
*(Column(fed_col, dat_sqlatyp) for fed_col in feed_cols),
extend_existing=True)
self.dt_feed_cols = feed_cols[:]
self.dt_all_cols = ['indx', 'final'] + feed_cols[:]
return atbl | [
"def",
"_datatable_factory",
"(",
"self",
")",
":",
"feed_cols",
"=",
"[",
"'feed{0:03d}'",
".",
"format",
"(",
"i",
"+",
"1",
")",
"for",
"i",
"in",
"range",
"(",
"self",
".",
"n_feeds",
")",
"]",
"feed_cols",
"=",
"[",
"'override_feed000'",
"]",
"+",... | creates a SQLAlchemy Table object with the appropriate number of
columns given the number of feeds | [
"creates",
"a",
"SQLAlchemy",
"Table",
"object",
"with",
"the",
"appropriate",
"number",
"of",
"columns",
"given",
"the",
"number",
"of",
"feeds"
] | a2802692bc642fa32096374159eea7ceca2947b4 | https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L1552-L1571 | train | 55,345 |
Equitable/trump | trump/orm.py | Feed.add_tags | def add_tags(self, tags):
""" add a tag or tags to a Feed """
if isinstance(tags, (str, unicode)):
tags = [tags]
objs = object_session(self)
tmps = [FeedTag(tag=t, feed=self) for t in tags]
objs.add_all(tmps)
objs.commit() | python | def add_tags(self, tags):
""" add a tag or tags to a Feed """
if isinstance(tags, (str, unicode)):
tags = [tags]
objs = object_session(self)
tmps = [FeedTag(tag=t, feed=self) for t in tags]
objs.add_all(tmps)
objs.commit() | [
"def",
"add_tags",
"(",
"self",
",",
"tags",
")",
":",
"if",
"isinstance",
"(",
"tags",
",",
"(",
"str",
",",
"unicode",
")",
")",
":",
"tags",
"=",
"[",
"tags",
"]",
"objs",
"=",
"object_session",
"(",
"self",
")",
"tmps",
"=",
"[",
"FeedTag",
"... | add a tag or tags to a Feed | [
"add",
"a",
"tag",
"or",
"tags",
"to",
"a",
"Feed"
] | a2802692bc642fa32096374159eea7ceca2947b4 | https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L1946-L1955 | train | 55,346 |
jplusplus/statscraper | statscraper/scrapers/work_injury_scraper.py | WorkInjuries.initiate_browser | def initiate_browser(self):
# Create a unique tempdir for downloaded files
tempdir = os.getenv(TEMPDIR_ENVVAR, DEFAULT_TEMPDIR)
tempsubdir = uuid4().hex
# TODO: Remove this directory when finished!
self.tempdir = os.path.join(tempdir, tempsubdir)
try:
# Try and create directory before checking if it exists,
# to avoid race condition
os.makedirs(self.tempdir)
except OSError:
if not os.path.isdir(self.tempdir):
raise
profile = webdriver.FirefoxProfile()
# Set download location, avoid download dialogues if possible
# Different settings needed for different Firefox versions
# This will be a long list...
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.manager.closeWhenDone', True)
profile.set_preference('browser.download.dir', self.tempdir)
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/octet-stream;application/vnd.ms-excel")
profile.set_preference("browser.helperApps.alwaysAsk.force", False)
profile.set_preference("browser.download.manager.useWindow", False)
self.browser = webdriver.Firefox(profile)
self.browser.get('http://webbstat.av.se')
detailed_cls = "Document_TX_GOTOTAB_Avancerad"
""" The button for expanded detailed options. This
also happens to be a good indicator as to wheter
all content is loaded.
"""
# Wait for a content element, and 3 extra seconds just in case
WebDriverWait(self.browser, PAGELOAD_TIMEOUT)\
.until(EC.presence_of_element_located((By.CLASS_NAME,
detailed_cls)))
self.browser.implicitly_wait(3)
self.browser\
.find_element_by_class_name(detailed_cls)\
.find_element_by_tag_name("td")\
.click()
# Wait for a content element, and 3 extra seconds just in case
WebDriverWait(self.browser, PAGELOAD_TIMEOUT)\
.until(EC.presence_of_element_located((By.CLASS_NAME,
detailed_cls)))
self.browser.implicitly_wait(3) | python | def initiate_browser(self):
# Create a unique tempdir for downloaded files
tempdir = os.getenv(TEMPDIR_ENVVAR, DEFAULT_TEMPDIR)
tempsubdir = uuid4().hex
# TODO: Remove this directory when finished!
self.tempdir = os.path.join(tempdir, tempsubdir)
try:
# Try and create directory before checking if it exists,
# to avoid race condition
os.makedirs(self.tempdir)
except OSError:
if not os.path.isdir(self.tempdir):
raise
profile = webdriver.FirefoxProfile()
# Set download location, avoid download dialogues if possible
# Different settings needed for different Firefox versions
# This will be a long list...
profile.set_preference('browser.download.folderList', 2)
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.manager.closeWhenDone', True)
profile.set_preference('browser.download.dir', self.tempdir)
profile.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/octet-stream;application/vnd.ms-excel")
profile.set_preference("browser.helperApps.alwaysAsk.force", False)
profile.set_preference("browser.download.manager.useWindow", False)
self.browser = webdriver.Firefox(profile)
self.browser.get('http://webbstat.av.se')
detailed_cls = "Document_TX_GOTOTAB_Avancerad"
""" The button for expanded detailed options. This
also happens to be a good indicator as to wheter
all content is loaded.
"""
# Wait for a content element, and 3 extra seconds just in case
WebDriverWait(self.browser, PAGELOAD_TIMEOUT)\
.until(EC.presence_of_element_located((By.CLASS_NAME,
detailed_cls)))
self.browser.implicitly_wait(3)
self.browser\
.find_element_by_class_name(detailed_cls)\
.find_element_by_tag_name("td")\
.click()
# Wait for a content element, and 3 extra seconds just in case
WebDriverWait(self.browser, PAGELOAD_TIMEOUT)\
.until(EC.presence_of_element_located((By.CLASS_NAME,
detailed_cls)))
self.browser.implicitly_wait(3) | [
"def",
"initiate_browser",
"(",
"self",
")",
":",
"# Create a unique tempdir for downloaded files",
"tempdir",
"=",
"os",
".",
"getenv",
"(",
"TEMPDIR_ENVVAR",
",",
"DEFAULT_TEMPDIR",
")",
"tempsubdir",
"=",
"uuid4",
"(",
")",
".",
"hex",
"# TODO: Remove this director... | The button for expanded detailed options. This
also happens to be a good indicator as to wheter
all content is loaded. | [
"The",
"button",
"for",
"expanded",
"detailed",
"options",
".",
"This",
"also",
"happens",
"to",
"be",
"a",
"good",
"indicator",
"as",
"to",
"wheter",
"all",
"content",
"is",
"loaded",
"."
] | 932ec048b23d15b3dbdaf829facc55fd78ec0109 | https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/scrapers/work_injury_scraper.py#L35-L85 | train | 55,347 |
e7dal/bubble3 | behave4cmd0/log/steps.py | step_I_create_logrecords_with_table | def step_I_create_logrecords_with_table(context):
"""
Step definition that creates one more log records by using a table.
.. code-block: gherkin
When I create log records with:
| category | level | message |
| foo | ERROR | Hello Foo |
| foo.bar | WARN | Hello Foo.Bar |
Table description
------------------
| Column | Type | Required | Description |
| category | string | yes | Category (or logger) to use. |
| level | LogLevel | yes | Log level to use. |
| message | string | yes | Log message to use. |
.. code-block: python
import logging
from behave.configuration import LogLevel
for row in table.rows:
logger = logging.getLogger(row.category)
level = LogLevel.parse_type(row.level)
logger.log(level, row.message)
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
for row in context.table.rows:
category = row["category"]
if category == "__ROOT__":
category = None
level = LogLevel.parse_type(row["level"])
message = row["message"]
make_log_record(category, level, message) | python | def step_I_create_logrecords_with_table(context):
"""
Step definition that creates one more log records by using a table.
.. code-block: gherkin
When I create log records with:
| category | level | message |
| foo | ERROR | Hello Foo |
| foo.bar | WARN | Hello Foo.Bar |
Table description
------------------
| Column | Type | Required | Description |
| category | string | yes | Category (or logger) to use. |
| level | LogLevel | yes | Log level to use. |
| message | string | yes | Log message to use. |
.. code-block: python
import logging
from behave.configuration import LogLevel
for row in table.rows:
logger = logging.getLogger(row.category)
level = LogLevel.parse_type(row.level)
logger.log(level, row.message)
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
for row in context.table.rows:
category = row["category"]
if category == "__ROOT__":
category = None
level = LogLevel.parse_type(row["level"])
message = row["message"]
make_log_record(category, level, message) | [
"def",
"step_I_create_logrecords_with_table",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"context",
".",
"table",
".",
"require_columns",
"(",
"[",
"\"category\"",
",",
"\"level\"",
",",
"\"message\"",
"]",
")",
... | Step definition that creates one more log records by using a table.
.. code-block: gherkin
When I create log records with:
| category | level | message |
| foo | ERROR | Hello Foo |
| foo.bar | WARN | Hello Foo.Bar |
Table description
------------------
| Column | Type | Required | Description |
| category | string | yes | Category (or logger) to use. |
| level | LogLevel | yes | Log level to use. |
| message | string | yes | Log message to use. |
.. code-block: python
import logging
from behave.configuration import LogLevel
for row in table.rows:
logger = logging.getLogger(row.category)
level = LogLevel.parse_type(row.level)
logger.log(level, row.message) | [
"Step",
"definition",
"that",
"creates",
"one",
"more",
"log",
"records",
"by",
"using",
"a",
"table",
"."
] | 59c735281a95b44f6263a25f4d6ce24fca520082 | https://github.com/e7dal/bubble3/blob/59c735281a95b44f6263a25f4d6ce24fca520082/behave4cmd0/log/steps.py#L134-L170 | train | 55,348 |
e7dal/bubble3 | behave4cmd0/log/steps.py | step_I_create_logrecord_with_table | def step_I_create_logrecord_with_table(context):
"""
Create an log record by using a table to provide the parts.
.. seealso: :func:`step_I_create_logrecords_with_table()`
"""
assert context.table, "REQUIRE: context.table"
assert len(context.table.rows) == 1, "REQUIRE: table.row.size == 1"
step_I_create_logrecords_with_table(context) | python | def step_I_create_logrecord_with_table(context):
"""
Create an log record by using a table to provide the parts.
.. seealso: :func:`step_I_create_logrecords_with_table()`
"""
assert context.table, "REQUIRE: context.table"
assert len(context.table.rows) == 1, "REQUIRE: table.row.size == 1"
step_I_create_logrecords_with_table(context) | [
"def",
"step_I_create_logrecord_with_table",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"assert",
"len",
"(",
"context",
".",
"table",
".",
"rows",
")",
"==",
"1",
",",
"\"REQUIRE: table.row.size == 1\"",
"step_I_... | Create an log record by using a table to provide the parts.
.. seealso: :func:`step_I_create_logrecords_with_table()` | [
"Create",
"an",
"log",
"record",
"by",
"using",
"a",
"table",
"to",
"provide",
"the",
"parts",
"."
] | 59c735281a95b44f6263a25f4d6ce24fca520082 | https://github.com/e7dal/bubble3/blob/59c735281a95b44f6263a25f4d6ce24fca520082/behave4cmd0/log/steps.py#L174-L182 | train | 55,349 |
e7dal/bubble3 | behave4cmd0/log/steps.py | step_use_log_record_configuration | def step_use_log_record_configuration(context):
"""
Define log record configuration parameters.
.. code-block: gherkin
Given I use the log record configuration:
| property | value |
| format | |
| datefmt | |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["property", "value"])
for row in context.table.rows:
property_name = row["property"]
value = row["value"]
if property_name == "format":
context.log_record_format = value
elif property_name == "datefmt":
context.log_record_datefmt = value
else:
raise KeyError("Unknown property=%s" % property_name) | python | def step_use_log_record_configuration(context):
"""
Define log record configuration parameters.
.. code-block: gherkin
Given I use the log record configuration:
| property | value |
| format | |
| datefmt | |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["property", "value"])
for row in context.table.rows:
property_name = row["property"]
value = row["value"]
if property_name == "format":
context.log_record_format = value
elif property_name == "datefmt":
context.log_record_datefmt = value
else:
raise KeyError("Unknown property=%s" % property_name) | [
"def",
"step_use_log_record_configuration",
"(",
"context",
")",
":",
"assert",
"context",
".",
"table",
",",
"\"REQUIRE: context.table\"",
"context",
".",
"table",
".",
"require_columns",
"(",
"[",
"\"property\"",
",",
"\"value\"",
"]",
")",
"for",
"row",
"in",
... | Define log record configuration parameters.
.. code-block: gherkin
Given I use the log record configuration:
| property | value |
| format | |
| datefmt | | | [
"Define",
"log",
"record",
"configuration",
"parameters",
"."
] | 59c735281a95b44f6263a25f4d6ce24fca520082 | https://github.com/e7dal/bubble3/blob/59c735281a95b44f6263a25f4d6ce24fca520082/behave4cmd0/log/steps.py#L350-L371 | train | 55,350 |
MacHu-GWU/crawlib-project | crawlib/decode.py | smart_decode | def smart_decode(binary, errors="strict"):
"""
Automatically find the right codec to decode binary data to string.
:param binary: binary data
:param errors: one of 'strict', 'ignore' and 'replace'
:return: string
"""
d = chardet.detect(binary)
encoding = d["encoding"]
confidence = d["confidence"]
text = binary.decode(encoding, errors=errors)
return text, encoding, confidence | python | def smart_decode(binary, errors="strict"):
"""
Automatically find the right codec to decode binary data to string.
:param binary: binary data
:param errors: one of 'strict', 'ignore' and 'replace'
:return: string
"""
d = chardet.detect(binary)
encoding = d["encoding"]
confidence = d["confidence"]
text = binary.decode(encoding, errors=errors)
return text, encoding, confidence | [
"def",
"smart_decode",
"(",
"binary",
",",
"errors",
"=",
"\"strict\"",
")",
":",
"d",
"=",
"chardet",
".",
"detect",
"(",
"binary",
")",
"encoding",
"=",
"d",
"[",
"\"encoding\"",
"]",
"confidence",
"=",
"d",
"[",
"\"confidence\"",
"]",
"text",
"=",
"... | Automatically find the right codec to decode binary data to string.
:param binary: binary data
:param errors: one of 'strict', 'ignore' and 'replace'
:return: string | [
"Automatically",
"find",
"the",
"right",
"codec",
"to",
"decode",
"binary",
"data",
"to",
"string",
"."
] | 241516f2a7a0a32c692f7af35a1f44064e8ce1ab | https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/decode.py#L12-L25 | train | 55,351 |
MacHu-GWU/crawlib-project | crawlib/decode.py | UrlSpecifiedDecoder.decode | def decode(self, binary, url, encoding=None, errors="strict"):
"""
Decode binary to string.
:param binary: binary content of a http request.
:param url: endpoint of the request.
:param encoding: manually specify the encoding.
:param errors: errors handle method.
:return: str
"""
if encoding is None:
domain = util.get_domain(url)
if domain in self.domain_encoding_table:
encoding = self.domain_encoding_table[domain]
html = binary.decode(encoding, errors=errors)
else:
html, encoding, confidence = smart_decode(
binary, errors=errors)
# cache domain name and encoding
self.domain_encoding_table[domain] = encoding
else:
html = binary.decode(encoding, errors=errors)
return html | python | def decode(self, binary, url, encoding=None, errors="strict"):
"""
Decode binary to string.
:param binary: binary content of a http request.
:param url: endpoint of the request.
:param encoding: manually specify the encoding.
:param errors: errors handle method.
:return: str
"""
if encoding is None:
domain = util.get_domain(url)
if domain in self.domain_encoding_table:
encoding = self.domain_encoding_table[domain]
html = binary.decode(encoding, errors=errors)
else:
html, encoding, confidence = smart_decode(
binary, errors=errors)
# cache domain name and encoding
self.domain_encoding_table[domain] = encoding
else:
html = binary.decode(encoding, errors=errors)
return html | [
"def",
"decode",
"(",
"self",
",",
"binary",
",",
"url",
",",
"encoding",
"=",
"None",
",",
"errors",
"=",
"\"strict\"",
")",
":",
"if",
"encoding",
"is",
"None",
":",
"domain",
"=",
"util",
".",
"get_domain",
"(",
"url",
")",
"if",
"domain",
"in",
... | Decode binary to string.
:param binary: binary content of a http request.
:param url: endpoint of the request.
:param encoding: manually specify the encoding.
:param errors: errors handle method.
:return: str | [
"Decode",
"binary",
"to",
"string",
"."
] | 241516f2a7a0a32c692f7af35a1f44064e8ce1ab | https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/decode.py#L49-L73 | train | 55,352 |
what-studio/smartformat | smartformat/dotnet.py | modify_number_pattern | def modify_number_pattern(number_pattern, **kwargs):
"""Modifies a number pattern by specified keyword arguments."""
params = ['pattern', 'prefix', 'suffix', 'grouping',
'int_prec', 'frac_prec', 'exp_prec', 'exp_plus']
for param in params:
if param in kwargs:
continue
kwargs[param] = getattr(number_pattern, param)
return NumberPattern(**kwargs) | python | def modify_number_pattern(number_pattern, **kwargs):
"""Modifies a number pattern by specified keyword arguments."""
params = ['pattern', 'prefix', 'suffix', 'grouping',
'int_prec', 'frac_prec', 'exp_prec', 'exp_plus']
for param in params:
if param in kwargs:
continue
kwargs[param] = getattr(number_pattern, param)
return NumberPattern(**kwargs) | [
"def",
"modify_number_pattern",
"(",
"number_pattern",
",",
"*",
"*",
"kwargs",
")",
":",
"params",
"=",
"[",
"'pattern'",
",",
"'prefix'",
",",
"'suffix'",
",",
"'grouping'",
",",
"'int_prec'",
",",
"'frac_prec'",
",",
"'exp_prec'",
",",
"'exp_plus'",
"]",
... | Modifies a number pattern by specified keyword arguments. | [
"Modifies",
"a",
"number",
"pattern",
"by",
"specified",
"keyword",
"arguments",
"."
] | 5731203cbf29617ab8d42542f9dac03d5e34b217 | https://github.com/what-studio/smartformat/blob/5731203cbf29617ab8d42542f9dac03d5e34b217/smartformat/dotnet.py#L31-L39 | train | 55,353 |
what-studio/smartformat | smartformat/dotnet.py | format_currency_field | def format_currency_field(__, prec, number, locale):
"""Formats a currency field."""
locale = Locale.parse(locale)
currency = get_territory_currencies(locale.territory)[0]
if prec is None:
pattern, currency_digits = None, True
else:
prec = int(prec)
pattern = locale.currency_formats['standard']
pattern = modify_number_pattern(pattern, frac_prec=(prec, prec))
currency_digits = False
return format_currency(number, currency, pattern, locale=locale,
currency_digits=currency_digits) | python | def format_currency_field(__, prec, number, locale):
"""Formats a currency field."""
locale = Locale.parse(locale)
currency = get_territory_currencies(locale.territory)[0]
if prec is None:
pattern, currency_digits = None, True
else:
prec = int(prec)
pattern = locale.currency_formats['standard']
pattern = modify_number_pattern(pattern, frac_prec=(prec, prec))
currency_digits = False
return format_currency(number, currency, pattern, locale=locale,
currency_digits=currency_digits) | [
"def",
"format_currency_field",
"(",
"__",
",",
"prec",
",",
"number",
",",
"locale",
")",
":",
"locale",
"=",
"Locale",
".",
"parse",
"(",
"locale",
")",
"currency",
"=",
"get_territory_currencies",
"(",
"locale",
".",
"territory",
")",
"[",
"0",
"]",
"... | Formats a currency field. | [
"Formats",
"a",
"currency",
"field",
"."
] | 5731203cbf29617ab8d42542f9dac03d5e34b217 | https://github.com/what-studio/smartformat/blob/5731203cbf29617ab8d42542f9dac03d5e34b217/smartformat/dotnet.py#L58-L70 | train | 55,354 |
what-studio/smartformat | smartformat/dotnet.py | format_float_field | def format_float_field(__, prec, number, locale):
"""Formats a fixed-point field."""
format_ = u'0.'
if prec is None:
format_ += u'#' * NUMBER_DECIMAL_DIGITS
else:
format_ += u'0' * int(prec)
pattern = parse_pattern(format_)
return pattern.apply(number, locale) | python | def format_float_field(__, prec, number, locale):
"""Formats a fixed-point field."""
format_ = u'0.'
if prec is None:
format_ += u'#' * NUMBER_DECIMAL_DIGITS
else:
format_ += u'0' * int(prec)
pattern = parse_pattern(format_)
return pattern.apply(number, locale) | [
"def",
"format_float_field",
"(",
"__",
",",
"prec",
",",
"number",
",",
"locale",
")",
":",
"format_",
"=",
"u'0.'",
"if",
"prec",
"is",
"None",
":",
"format_",
"+=",
"u'#'",
"*",
"NUMBER_DECIMAL_DIGITS",
"else",
":",
"format_",
"+=",
"u'0'",
"*",
"int"... | Formats a fixed-point field. | [
"Formats",
"a",
"fixed",
"-",
"point",
"field",
"."
] | 5731203cbf29617ab8d42542f9dac03d5e34b217 | https://github.com/what-studio/smartformat/blob/5731203cbf29617ab8d42542f9dac03d5e34b217/smartformat/dotnet.py#L103-L111 | train | 55,355 |
what-studio/smartformat | smartformat/dotnet.py | format_number_field | def format_number_field(__, prec, number, locale):
"""Formats a number field."""
prec = NUMBER_DECIMAL_DIGITS if prec is None else int(prec)
locale = Locale.parse(locale)
pattern = locale.decimal_formats.get(None)
return pattern.apply(number, locale, force_frac=(prec, prec)) | python | def format_number_field(__, prec, number, locale):
"""Formats a number field."""
prec = NUMBER_DECIMAL_DIGITS if prec is None else int(prec)
locale = Locale.parse(locale)
pattern = locale.decimal_formats.get(None)
return pattern.apply(number, locale, force_frac=(prec, prec)) | [
"def",
"format_number_field",
"(",
"__",
",",
"prec",
",",
"number",
",",
"locale",
")",
":",
"prec",
"=",
"NUMBER_DECIMAL_DIGITS",
"if",
"prec",
"is",
"None",
"else",
"int",
"(",
"prec",
")",
"locale",
"=",
"Locale",
".",
"parse",
"(",
"locale",
")",
... | Formats a number field. | [
"Formats",
"a",
"number",
"field",
"."
] | 5731203cbf29617ab8d42542f9dac03d5e34b217 | https://github.com/what-studio/smartformat/blob/5731203cbf29617ab8d42542f9dac03d5e34b217/smartformat/dotnet.py#L116-L121 | train | 55,356 |
what-studio/smartformat | smartformat/dotnet.py | format_percent_field | def format_percent_field(__, prec, number, locale):
"""Formats a percent field."""
prec = PERCENT_DECIMAL_DIGITS if prec is None else int(prec)
locale = Locale.parse(locale)
pattern = locale.percent_formats.get(None)
return pattern.apply(number, locale, force_frac=(prec, prec)) | python | def format_percent_field(__, prec, number, locale):
"""Formats a percent field."""
prec = PERCENT_DECIMAL_DIGITS if prec is None else int(prec)
locale = Locale.parse(locale)
pattern = locale.percent_formats.get(None)
return pattern.apply(number, locale, force_frac=(prec, prec)) | [
"def",
"format_percent_field",
"(",
"__",
",",
"prec",
",",
"number",
",",
"locale",
")",
":",
"prec",
"=",
"PERCENT_DECIMAL_DIGITS",
"if",
"prec",
"is",
"None",
"else",
"int",
"(",
"prec",
")",
"locale",
"=",
"Locale",
".",
"parse",
"(",
"locale",
")",
... | Formats a percent field. | [
"Formats",
"a",
"percent",
"field",
"."
] | 5731203cbf29617ab8d42542f9dac03d5e34b217 | https://github.com/what-studio/smartformat/blob/5731203cbf29617ab8d42542f9dac03d5e34b217/smartformat/dotnet.py#L126-L131 | train | 55,357 |
what-studio/smartformat | smartformat/dotnet.py | format_hexadecimal_field | def format_hexadecimal_field(spec, prec, number, locale):
"""Formats a hexadeciaml field."""
if number < 0:
# Take two's complement.
number &= (1 << (8 * int(math.log(-number, 1 << 8) + 1))) - 1
format_ = u'0%d%s' % (int(prec or 0), spec)
return format(number, format_) | python | def format_hexadecimal_field(spec, prec, number, locale):
"""Formats a hexadeciaml field."""
if number < 0:
# Take two's complement.
number &= (1 << (8 * int(math.log(-number, 1 << 8) + 1))) - 1
format_ = u'0%d%s' % (int(prec or 0), spec)
return format(number, format_) | [
"def",
"format_hexadecimal_field",
"(",
"spec",
",",
"prec",
",",
"number",
",",
"locale",
")",
":",
"if",
"number",
"<",
"0",
":",
"# Take two's complement.",
"number",
"&=",
"(",
"1",
"<<",
"(",
"8",
"*",
"int",
"(",
"math",
".",
"log",
"(",
"-",
"... | Formats a hexadeciaml field. | [
"Formats",
"a",
"hexadeciaml",
"field",
"."
] | 5731203cbf29617ab8d42542f9dac03d5e34b217 | https://github.com/what-studio/smartformat/blob/5731203cbf29617ab8d42542f9dac03d5e34b217/smartformat/dotnet.py#L136-L142 | train | 55,358 |
hsharrison/smartcompose | smartcompose.py | delegate | def delegate(attribute_name, method_names):
"""
Decorator factory to delegate methods to an attribute.
Decorate a class to map every method in `method_names` to the attribute `attribute_name`.
"""
call_attribute_method = partial(_call_delegated_method, attribute_name)
def decorate(class_):
for method in method_names:
setattr(class_, method, partialmethod(call_attribute_method, method))
return class_
return decorate | python | def delegate(attribute_name, method_names):
"""
Decorator factory to delegate methods to an attribute.
Decorate a class to map every method in `method_names` to the attribute `attribute_name`.
"""
call_attribute_method = partial(_call_delegated_method, attribute_name)
def decorate(class_):
for method in method_names:
setattr(class_, method, partialmethod(call_attribute_method, method))
return class_
return decorate | [
"def",
"delegate",
"(",
"attribute_name",
",",
"method_names",
")",
":",
"call_attribute_method",
"=",
"partial",
"(",
"_call_delegated_method",
",",
"attribute_name",
")",
"def",
"decorate",
"(",
"class_",
")",
":",
"for",
"method",
"in",
"method_names",
":",
"... | Decorator factory to delegate methods to an attribute.
Decorate a class to map every method in `method_names` to the attribute `attribute_name`. | [
"Decorator",
"factory",
"to",
"delegate",
"methods",
"to",
"an",
"attribute",
"."
] | 3f7cdeaf0812b35b2c49a6917815abca6e2c48ca | https://github.com/hsharrison/smartcompose/blob/3f7cdeaf0812b35b2c49a6917815abca6e2c48ca/smartcompose.py#L24-L38 | train | 55,359 |
MostAwesomeDude/gentleman | gentleman/helpers.py | prepare_query | def prepare_query(query):
"""
Prepare a query object for the RAPI.
RAPI has lots of curious rules for coercing values.
This function operates on dicts in-place and has no return value.
@type query: dict
@param query: Query arguments
"""
for name in query:
value = query[name]
# None is sent as an empty string.
if value is None:
query[name] = ""
# Booleans are sent as 0 or 1.
elif isinstance(value, bool):
query[name] = int(value)
# XXX shouldn't this just check for basestring instead?
elif isinstance(value, dict):
raise ValueError("Invalid query data type %r" %
type(value).__name__) | python | def prepare_query(query):
"""
Prepare a query object for the RAPI.
RAPI has lots of curious rules for coercing values.
This function operates on dicts in-place and has no return value.
@type query: dict
@param query: Query arguments
"""
for name in query:
value = query[name]
# None is sent as an empty string.
if value is None:
query[name] = ""
# Booleans are sent as 0 or 1.
elif isinstance(value, bool):
query[name] = int(value)
# XXX shouldn't this just check for basestring instead?
elif isinstance(value, dict):
raise ValueError("Invalid query data type %r" %
type(value).__name__) | [
"def",
"prepare_query",
"(",
"query",
")",
":",
"for",
"name",
"in",
"query",
":",
"value",
"=",
"query",
"[",
"name",
"]",
"# None is sent as an empty string.",
"if",
"value",
"is",
"None",
":",
"query",
"[",
"name",
"]",
"=",
"\"\"",
"# Booleans are sent a... | Prepare a query object for the RAPI.
RAPI has lots of curious rules for coercing values.
This function operates on dicts in-place and has no return value.
@type query: dict
@param query: Query arguments | [
"Prepare",
"a",
"query",
"object",
"for",
"the",
"RAPI",
"."
] | 17fb8ffb922aa4af9d8bcab85e452c9311d41805 | https://github.com/MostAwesomeDude/gentleman/blob/17fb8ffb922aa4af9d8bcab85e452c9311d41805/gentleman/helpers.py#L7-L33 | train | 55,360 |
MostAwesomeDude/gentleman | gentleman/helpers.py | itemgetters | def itemgetters(*args):
"""
Get a handful of items from an iterable.
This is just map(itemgetter(...), iterable) with a list comprehension.
"""
f = itemgetter(*args)
def inner(l):
return [f(x) for x in l]
return inner | python | def itemgetters(*args):
"""
Get a handful of items from an iterable.
This is just map(itemgetter(...), iterable) with a list comprehension.
"""
f = itemgetter(*args)
def inner(l):
return [f(x) for x in l]
return inner | [
"def",
"itemgetters",
"(",
"*",
"args",
")",
":",
"f",
"=",
"itemgetter",
"(",
"*",
"args",
")",
"def",
"inner",
"(",
"l",
")",
":",
"return",
"[",
"f",
"(",
"x",
")",
"for",
"x",
"in",
"l",
"]",
"return",
"inner"
] | Get a handful of items from an iterable.
This is just map(itemgetter(...), iterable) with a list comprehension. | [
"Get",
"a",
"handful",
"of",
"items",
"from",
"an",
"iterable",
"."
] | 17fb8ffb922aa4af9d8bcab85e452c9311d41805 | https://github.com/MostAwesomeDude/gentleman/blob/17fb8ffb922aa4af9d8bcab85e452c9311d41805/gentleman/helpers.py#L35-L47 | train | 55,361 |
HPCC-Cloud-Computing/CAL | calplus/v1/object_storage/client.py | Client.stat_container | def stat_container(self, container):
"""Stat container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
"""
LOG.debug('stat_container() with %s is success.', self.driver)
return self.driver.stat_container(container) | python | def stat_container(self, container):
"""Stat container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
"""
LOG.debug('stat_container() with %s is success.', self.driver)
return self.driver.stat_container(container) | [
"def",
"stat_container",
"(",
"self",
",",
"container",
")",
":",
"LOG",
".",
"debug",
"(",
"'stat_container() with %s is success.'",
",",
"self",
".",
"driver",
")",
"return",
"self",
".",
"driver",
".",
"stat_container",
"(",
"container",
")"
] | Stat container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon). | [
"Stat",
"container",
"metadata"
] | 7134b3dfe9ee3a383506a592765c7a12fa4ca1e9 | https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/object_storage/client.py#L53-L60 | train | 55,362 |
HPCC-Cloud-Computing/CAL | calplus/v1/object_storage/client.py | Client.update_container | def update_container(self, container, metadata, **kwargs):
"""Update container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param metadata(dict): additional metadata to include in the request.
:param **kwargs(dict): extend args for specific driver.
"""
LOG.debug('update_object() with %s is success.', self.driver)
return self.driver.update_container(container, metadata, **kwargs) | python | def update_container(self, container, metadata, **kwargs):
"""Update container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param metadata(dict): additional metadata to include in the request.
:param **kwargs(dict): extend args for specific driver.
"""
LOG.debug('update_object() with %s is success.', self.driver)
return self.driver.update_container(container, metadata, **kwargs) | [
"def",
"update_container",
"(",
"self",
",",
"container",
",",
"metadata",
",",
"*",
"*",
"kwargs",
")",
":",
"LOG",
".",
"debug",
"(",
"'update_object() with %s is success.'",
",",
"self",
".",
"driver",
")",
"return",
"self",
".",
"driver",
".",
"update_co... | Update container metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param metadata(dict): additional metadata to include in the request.
:param **kwargs(dict): extend args for specific driver. | [
"Update",
"container",
"metadata"
] | 7134b3dfe9ee3a383506a592765c7a12fa4ca1e9 | https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/object_storage/client.py#L62-L71 | train | 55,363 |
HPCC-Cloud-Computing/CAL | calplus/v1/object_storage/client.py | Client.stat_object | def stat_object(self, container, obj):
"""Stat object metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param obj: object name (Object is equivalent to
Key term in Amazon).
"""
LOG.debug('stat_object() with %s is success.', self.driver)
return self.driver.stat_object(container, obj) | python | def stat_object(self, container, obj):
"""Stat object metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param obj: object name (Object is equivalent to
Key term in Amazon).
"""
LOG.debug('stat_object() with %s is success.', self.driver)
return self.driver.stat_object(container, obj) | [
"def",
"stat_object",
"(",
"self",
",",
"container",
",",
"obj",
")",
":",
"LOG",
".",
"debug",
"(",
"'stat_object() with %s is success.'",
",",
"self",
".",
"driver",
")",
"return",
"self",
".",
"driver",
".",
"stat_object",
"(",
"container",
",",
"obj",
... | Stat object metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param obj: object name (Object is equivalent to
Key term in Amazon). | [
"Stat",
"object",
"metadata"
] | 7134b3dfe9ee3a383506a592765c7a12fa4ca1e9 | https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/object_storage/client.py#L112-L121 | train | 55,364 |
HPCC-Cloud-Computing/CAL | calplus/v1/object_storage/client.py | Client.delete_object | def delete_object(self, container, obj, **kwargs):
"""Delete object in container
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param obj: object name (Object is equivalent to
Key term in Amazon).
"""
try:
LOG.debug('delete_object() with %s is success.', self.driver)
return self.driver.delete_object(container, obj, **kwargs)
except DriverException as e:
LOG.exception('download_object() with %s raised\
an exception %s.', self.driver, e) | python | def delete_object(self, container, obj, **kwargs):
"""Delete object in container
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param obj: object name (Object is equivalent to
Key term in Amazon).
"""
try:
LOG.debug('delete_object() with %s is success.', self.driver)
return self.driver.delete_object(container, obj, **kwargs)
except DriverException as e:
LOG.exception('download_object() with %s raised\
an exception %s.', self.driver, e) | [
"def",
"delete_object",
"(",
"self",
",",
"container",
",",
"obj",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"LOG",
".",
"debug",
"(",
"'delete_object() with %s is success.'",
",",
"self",
".",
"driver",
")",
"return",
"self",
".",
"driver",
".",
"d... | Delete object in container
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param obj: object name (Object is equivalent to
Key term in Amazon). | [
"Delete",
"object",
"in",
"container"
] | 7134b3dfe9ee3a383506a592765c7a12fa4ca1e9 | https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/object_storage/client.py#L123-L136 | train | 55,365 |
HPCC-Cloud-Computing/CAL | calplus/v1/object_storage/client.py | Client.list_container_objects | def list_container_objects(self, container, prefix=None, delimiter=None):
"""List container objects
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param prefix: prefix query
:param delimiter: string to delimit the queries on
"""
LOG.debug('list_container_objects() with %s is success.', self.driver)
return self.driver.list_container_objects(container, prefix, delimiter) | python | def list_container_objects(self, container, prefix=None, delimiter=None):
"""List container objects
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param prefix: prefix query
:param delimiter: string to delimit the queries on
"""
LOG.debug('list_container_objects() with %s is success.', self.driver)
return self.driver.list_container_objects(container, prefix, delimiter) | [
"def",
"list_container_objects",
"(",
"self",
",",
"container",
",",
"prefix",
"=",
"None",
",",
"delimiter",
"=",
"None",
")",
":",
"LOG",
".",
"debug",
"(",
"'list_container_objects() with %s is success.'",
",",
"self",
".",
"driver",
")",
"return",
"self",
... | List container objects
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param prefix: prefix query
:param delimiter: string to delimit the queries on | [
"List",
"container",
"objects"
] | 7134b3dfe9ee3a383506a592765c7a12fa4ca1e9 | https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/object_storage/client.py#L138-L147 | train | 55,366 |
HPCC-Cloud-Computing/CAL | calplus/v1/object_storage/client.py | Client.update_object | def update_object(self, container, obj, metadata, **kwargs):
"""Update object metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param obj: object name (Object is equivalent to
Key term in Amazon).
:param metadata(dict): additional metadata to include in the request.
"""
try:
LOG.debug('update_object() with %s is success.', self.driver)
return self.driver.update_object(container, obj,
metadata, **kwargs)
except DriverException as e:
LOG.exception('copy_object() with %s raised\
an exception %s.', self.driver, e) | python | def update_object(self, container, obj, metadata, **kwargs):
"""Update object metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param obj: object name (Object is equivalent to
Key term in Amazon).
:param metadata(dict): additional metadata to include in the request.
"""
try:
LOG.debug('update_object() with %s is success.', self.driver)
return self.driver.update_object(container, obj,
metadata, **kwargs)
except DriverException as e:
LOG.exception('copy_object() with %s raised\
an exception %s.', self.driver, e) | [
"def",
"update_object",
"(",
"self",
",",
"container",
",",
"obj",
",",
"metadata",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"LOG",
".",
"debug",
"(",
"'update_object() with %s is success.'",
",",
"self",
".",
"driver",
")",
"return",
"self",
".",
... | Update object metadata
:param container: container name (Container is equivalent to
Bucket term in Amazon).
:param obj: object name (Object is equivalent to
Key term in Amazon).
:param metadata(dict): additional metadata to include in the request. | [
"Update",
"object",
"metadata"
] | 7134b3dfe9ee3a383506a592765c7a12fa4ca1e9 | https://github.com/HPCC-Cloud-Computing/CAL/blob/7134b3dfe9ee3a383506a592765c7a12fa4ca1e9/calplus/v1/object_storage/client.py#L149-L164 | train | 55,367 |
Cadasta/django-tutelary | tutelary/decorators.py | get_path_fields | def get_path_fields(cls, base=[]):
"""Get object fields used for calculation of django-tutelary object
paths.
"""
pfs = []
for pf in cls.TutelaryMeta.path_fields:
if pf == 'pk':
pfs.append(base + ['pk'])
else:
f = cls._meta.get_field(pf)
if isinstance(f, models.ForeignKey):
pfs += get_path_fields(f.target_field.model, base=base + [pf])
else:
pfs.append(base + [f.name])
return pfs | python | def get_path_fields(cls, base=[]):
"""Get object fields used for calculation of django-tutelary object
paths.
"""
pfs = []
for pf in cls.TutelaryMeta.path_fields:
if pf == 'pk':
pfs.append(base + ['pk'])
else:
f = cls._meta.get_field(pf)
if isinstance(f, models.ForeignKey):
pfs += get_path_fields(f.target_field.model, base=base + [pf])
else:
pfs.append(base + [f.name])
return pfs | [
"def",
"get_path_fields",
"(",
"cls",
",",
"base",
"=",
"[",
"]",
")",
":",
"pfs",
"=",
"[",
"]",
"for",
"pf",
"in",
"cls",
".",
"TutelaryMeta",
".",
"path_fields",
":",
"if",
"pf",
"==",
"'pk'",
":",
"pfs",
".",
"append",
"(",
"base",
"+",
"[",
... | Get object fields used for calculation of django-tutelary object
paths. | [
"Get",
"object",
"fields",
"used",
"for",
"calculation",
"of",
"django",
"-",
"tutelary",
"object",
"paths",
"."
] | 66bb05de7098777c0a383410c287bf48433cde87 | https://github.com/Cadasta/django-tutelary/blob/66bb05de7098777c0a383410c287bf48433cde87/tutelary/decorators.py#L41-L56 | train | 55,368 |
Cadasta/django-tutelary | tutelary/decorators.py | get_perms_object | def get_perms_object(obj, action):
"""Get the django-tutelary path for an object, based on the fields
listed in ``TutelaryMeta.pfs``.
"""
def get_one(pf):
if isinstance(pf, str):
return pf
else:
return str(reduce(lambda o, f: getattr(o, f), pf, obj))
return Object([get_one(pf) for pf in obj.__class__.TutelaryMeta.pfs]) | python | def get_perms_object(obj, action):
"""Get the django-tutelary path for an object, based on the fields
listed in ``TutelaryMeta.pfs``.
"""
def get_one(pf):
if isinstance(pf, str):
return pf
else:
return str(reduce(lambda o, f: getattr(o, f), pf, obj))
return Object([get_one(pf) for pf in obj.__class__.TutelaryMeta.pfs]) | [
"def",
"get_perms_object",
"(",
"obj",
",",
"action",
")",
":",
"def",
"get_one",
"(",
"pf",
")",
":",
"if",
"isinstance",
"(",
"pf",
",",
"str",
")",
":",
"return",
"pf",
"else",
":",
"return",
"str",
"(",
"reduce",
"(",
"lambda",
"o",
",",
"f",
... | Get the django-tutelary path for an object, based on the fields
listed in ``TutelaryMeta.pfs``. | [
"Get",
"the",
"django",
"-",
"tutelary",
"path",
"for",
"an",
"object",
"based",
"on",
"the",
"fields",
"listed",
"in",
"TutelaryMeta",
".",
"pfs",
"."
] | 66bb05de7098777c0a383410c287bf48433cde87 | https://github.com/Cadasta/django-tutelary/blob/66bb05de7098777c0a383410c287bf48433cde87/tutelary/decorators.py#L59-L69 | train | 55,369 |
Cadasta/django-tutelary | tutelary/decorators.py | permissioned_model | def permissioned_model(cls, perm_type=None, path_fields=None, actions=None):
"""Function to set up a model for permissioning. Can either be called
directly, passing a class and suitable values for ``perm_type``,
``path_fields`` and ``actions``, or can be used as a class
decorator, taking values for ``perm_type``, ``path_fields`` and
``actions`` from the ``TutelaryMeta`` subclass of the decorated
class.
"""
if not issubclass(cls, models.Model):
raise DecoratorException(
'permissioned_model',
"class '" + cls.__name__ + "' is not a Django model"
)
added = False
try:
if not hasattr(cls, 'TutelaryMeta'):
if perm_type is None or path_fields is None or actions is None:
raise DecoratorException(
'permissioned_model',
("missing argument: all of perm_type, path_fields and " +
"actions must be supplied")
)
added = True
cls.TutelaryMeta = type('TutelaryMeta', (object,),
dict(perm_type=perm_type,
path_fields=path_fields,
actions=actions))
cls.TutelaryMeta.pfs = ([cls.TutelaryMeta.perm_type] +
get_path_fields(cls))
perms_objs = {}
for a in cls.TutelaryMeta.actions:
an = a
ap = {}
if isinstance(a, tuple):
an = a[0]
ap = a[1]
Action.register(an)
if isinstance(ap, dict) and 'permissions_object' in ap:
po = ap['permissions_object']
if po is not None:
try:
t = cls._meta.get_field(po).__class__
if t not in [models.ForeignKey,
models.OneToOneField]:
raise PermissionObjectException(po)
except:
raise PermissionObjectException(po)
perms_objs[an] = po
if len(perms_objs) == 0:
cls.get_permissions_object = get_perms_object
else:
cls.get_permissions_object = make_get_perms_object(perms_objs)
return cls
except:
if added:
del cls.TutelaryMeta
raise | python | def permissioned_model(cls, perm_type=None, path_fields=None, actions=None):
"""Function to set up a model for permissioning. Can either be called
directly, passing a class and suitable values for ``perm_type``,
``path_fields`` and ``actions``, or can be used as a class
decorator, taking values for ``perm_type``, ``path_fields`` and
``actions`` from the ``TutelaryMeta`` subclass of the decorated
class.
"""
if not issubclass(cls, models.Model):
raise DecoratorException(
'permissioned_model',
"class '" + cls.__name__ + "' is not a Django model"
)
added = False
try:
if not hasattr(cls, 'TutelaryMeta'):
if perm_type is None or path_fields is None or actions is None:
raise DecoratorException(
'permissioned_model',
("missing argument: all of perm_type, path_fields and " +
"actions must be supplied")
)
added = True
cls.TutelaryMeta = type('TutelaryMeta', (object,),
dict(perm_type=perm_type,
path_fields=path_fields,
actions=actions))
cls.TutelaryMeta.pfs = ([cls.TutelaryMeta.perm_type] +
get_path_fields(cls))
perms_objs = {}
for a in cls.TutelaryMeta.actions:
an = a
ap = {}
if isinstance(a, tuple):
an = a[0]
ap = a[1]
Action.register(an)
if isinstance(ap, dict) and 'permissions_object' in ap:
po = ap['permissions_object']
if po is not None:
try:
t = cls._meta.get_field(po).__class__
if t not in [models.ForeignKey,
models.OneToOneField]:
raise PermissionObjectException(po)
except:
raise PermissionObjectException(po)
perms_objs[an] = po
if len(perms_objs) == 0:
cls.get_permissions_object = get_perms_object
else:
cls.get_permissions_object = make_get_perms_object(perms_objs)
return cls
except:
if added:
del cls.TutelaryMeta
raise | [
"def",
"permissioned_model",
"(",
"cls",
",",
"perm_type",
"=",
"None",
",",
"path_fields",
"=",
"None",
",",
"actions",
"=",
"None",
")",
":",
"if",
"not",
"issubclass",
"(",
"cls",
",",
"models",
".",
"Model",
")",
":",
"raise",
"DecoratorException",
"... | Function to set up a model for permissioning. Can either be called
directly, passing a class and suitable values for ``perm_type``,
``path_fields`` and ``actions``, or can be used as a class
decorator, taking values for ``perm_type``, ``path_fields`` and
``actions`` from the ``TutelaryMeta`` subclass of the decorated
class. | [
"Function",
"to",
"set",
"up",
"a",
"model",
"for",
"permissioning",
".",
"Can",
"either",
"be",
"called",
"directly",
"passing",
"a",
"class",
"and",
"suitable",
"values",
"for",
"perm_type",
"path_fields",
"and",
"actions",
"or",
"can",
"be",
"used",
"as",... | 66bb05de7098777c0a383410c287bf48433cde87 | https://github.com/Cadasta/django-tutelary/blob/66bb05de7098777c0a383410c287bf48433cde87/tutelary/decorators.py#L89-L146 | train | 55,370 |
hollenstein/maspy | maspy/core.py | _getArrays | def _getArrays(items, attr, defaultValue):
"""Return arrays with equal size of item attributes from a list of sorted
"items" for fast and convenient data processing.
:param attr: list of item attributes that should be added to the returned
array.
:param defaultValue: if an item is missing an attribute, the "defaultValue"
is added to the array instead.
:returns: {'attribute1': numpy.array([attributeValue1, ...]), ...}
"""
arrays = dict([(key, []) for key in attr])
for item in items:
for key in attr:
arrays[key].append(getattr(item, key, defaultValue))
for key in [_ for _ in viewkeys(arrays)]:
arrays[key] = numpy.array(arrays[key])
return arrays | python | def _getArrays(items, attr, defaultValue):
"""Return arrays with equal size of item attributes from a list of sorted
"items" for fast and convenient data processing.
:param attr: list of item attributes that should be added to the returned
array.
:param defaultValue: if an item is missing an attribute, the "defaultValue"
is added to the array instead.
:returns: {'attribute1': numpy.array([attributeValue1, ...]), ...}
"""
arrays = dict([(key, []) for key in attr])
for item in items:
for key in attr:
arrays[key].append(getattr(item, key, defaultValue))
for key in [_ for _ in viewkeys(arrays)]:
arrays[key] = numpy.array(arrays[key])
return arrays | [
"def",
"_getArrays",
"(",
"items",
",",
"attr",
",",
"defaultValue",
")",
":",
"arrays",
"=",
"dict",
"(",
"[",
"(",
"key",
",",
"[",
"]",
")",
"for",
"key",
"in",
"attr",
"]",
")",
"for",
"item",
"in",
"items",
":",
"for",
"key",
"in",
"attr",
... | Return arrays with equal size of item attributes from a list of sorted
"items" for fast and convenient data processing.
:param attr: list of item attributes that should be added to the returned
array.
:param defaultValue: if an item is missing an attribute, the "defaultValue"
is added to the array instead.
:returns: {'attribute1': numpy.array([attributeValue1, ...]), ...} | [
"Return",
"arrays",
"with",
"equal",
"size",
"of",
"item",
"attributes",
"from",
"a",
"list",
"of",
"sorted",
"items",
"for",
"fast",
"and",
"convenient",
"data",
"processing",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L53-L71 | train | 55,371 |
hollenstein/maspy | maspy/core.py | addMsrunContainers | def addMsrunContainers(mainContainer, subContainer):
"""Adds the complete content of all specfile entries from the subContainer
to the mainContainer. However if a specfile of ``subContainer.info`` is
already present in ``mainContainer.info`` its contents are not added to the
mainContainer.
:param mainContainer: :class:`MsrunContainer`
:param subContainer: :class:`MsrunContainer`
.. warning:: does not generate new items, all items added to the
``mainContainer`` are still present in the ``subContainer`` and changes
made to elements of one container also affects the elements of the other
one (ie elements share same memory location).
"""
typeToContainer = {'rm': 'rmc', 'ci': 'cic', 'smi': 'smic',
'sai': 'saic', 'si': 'sic'
}
for specfile in subContainer.info:
if specfile in mainContainer.info:
continue
mainContainer.addSpecfile(specfile, subContainer.info[specfile]['path'])
for datatype, status in listitems(subContainer.info[specfile]['status']):
if not status:
continue
datatypeContainer = typeToContainer[datatype]
dataTypeContainer = getattr(mainContainer, datatypeContainer)
subContainerData = getattr(subContainer,
datatypeContainer
)[specfile]
dataTypeContainer[specfile] = subContainerData
mainContainer.info[specfile]['status'][datatype] = True | python | def addMsrunContainers(mainContainer, subContainer):
"""Adds the complete content of all specfile entries from the subContainer
to the mainContainer. However if a specfile of ``subContainer.info`` is
already present in ``mainContainer.info`` its contents are not added to the
mainContainer.
:param mainContainer: :class:`MsrunContainer`
:param subContainer: :class:`MsrunContainer`
.. warning:: does not generate new items, all items added to the
``mainContainer`` are still present in the ``subContainer`` and changes
made to elements of one container also affects the elements of the other
one (ie elements share same memory location).
"""
typeToContainer = {'rm': 'rmc', 'ci': 'cic', 'smi': 'smic',
'sai': 'saic', 'si': 'sic'
}
for specfile in subContainer.info:
if specfile in mainContainer.info:
continue
mainContainer.addSpecfile(specfile, subContainer.info[specfile]['path'])
for datatype, status in listitems(subContainer.info[specfile]['status']):
if not status:
continue
datatypeContainer = typeToContainer[datatype]
dataTypeContainer = getattr(mainContainer, datatypeContainer)
subContainerData = getattr(subContainer,
datatypeContainer
)[specfile]
dataTypeContainer[specfile] = subContainerData
mainContainer.info[specfile]['status'][datatype] = True | [
"def",
"addMsrunContainers",
"(",
"mainContainer",
",",
"subContainer",
")",
":",
"typeToContainer",
"=",
"{",
"'rm'",
":",
"'rmc'",
",",
"'ci'",
":",
"'cic'",
",",
"'smi'",
":",
"'smic'",
",",
"'sai'",
":",
"'saic'",
",",
"'si'",
":",
"'sic'",
"}",
"for... | Adds the complete content of all specfile entries from the subContainer
to the mainContainer. However if a specfile of ``subContainer.info`` is
already present in ``mainContainer.info`` its contents are not added to the
mainContainer.
:param mainContainer: :class:`MsrunContainer`
:param subContainer: :class:`MsrunContainer`
.. warning:: does not generate new items, all items added to the
``mainContainer`` are still present in the ``subContainer`` and changes
made to elements of one container also affects the elements of the other
one (ie elements share same memory location). | [
"Adds",
"the",
"complete",
"content",
"of",
"all",
"specfile",
"entries",
"from",
"the",
"subContainer",
"to",
"the",
"mainContainer",
".",
"However",
"if",
"a",
"specfile",
"of",
"subContainer",
".",
"info",
"is",
"already",
"present",
"in",
"mainContainer",
... | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L1225-L1256 | train | 55,372 |
hollenstein/maspy | maspy/core.py | MsrunContainer.setPath | def setPath(self, folderpath, specfiles=None):
"""Changes the folderpath of the specified specfiles. The folderpath is
used for saving and loading of ``mrc`` files.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param folderpath: a filedirectory
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
_containerSetPath(self, folderpath, specfiles) | python | def setPath(self, folderpath, specfiles=None):
"""Changes the folderpath of the specified specfiles. The folderpath is
used for saving and loading of ``mrc`` files.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param folderpath: a filedirectory
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
_containerSetPath(self, folderpath, specfiles) | [
"def",
"setPath",
"(",
"self",
",",
"folderpath",
",",
"specfiles",
"=",
"None",
")",
":",
"if",
"specfiles",
"is",
"None",
":",
"specfiles",
"=",
"[",
"_",
"for",
"_",
"in",
"viewkeys",
"(",
"self",
".",
"info",
")",
"]",
"else",
":",
"specfiles",
... | Changes the folderpath of the specified specfiles. The folderpath is
used for saving and loading of ``mrc`` files.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param folderpath: a filedirectory | [
"Changes",
"the",
"folderpath",
"of",
"the",
"specified",
"specfiles",
".",
"The",
"folderpath",
"is",
"used",
"for",
"saving",
"and",
"loading",
"of",
"mrc",
"files",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L356-L370 | train | 55,373 |
hollenstein/maspy | maspy/core.py | MsrunContainer.removeSpecfile | def removeSpecfile(self, specfiles):
"""Completely removes the specified specfiles from the
``msrunContainer``.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: str, [str, str]
"""
for specfile in aux.toList(specfiles):
for datatypeContainer in ['rmc', 'cic', 'smic', 'saic', 'sic']:
dataContainer = getattr(self, datatypeContainer)
try:
del dataContainer[specfile]
except KeyError:
pass
del self.info[specfile] | python | def removeSpecfile(self, specfiles):
"""Completely removes the specified specfiles from the
``msrunContainer``.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: str, [str, str]
"""
for specfile in aux.toList(specfiles):
for datatypeContainer in ['rmc', 'cic', 'smic', 'saic', 'sic']:
dataContainer = getattr(self, datatypeContainer)
try:
del dataContainer[specfile]
except KeyError:
pass
del self.info[specfile] | [
"def",
"removeSpecfile",
"(",
"self",
",",
"specfiles",
")",
":",
"for",
"specfile",
"in",
"aux",
".",
"toList",
"(",
"specfiles",
")",
":",
"for",
"datatypeContainer",
"in",
"[",
"'rmc'",
",",
"'cic'",
",",
"'smic'",
",",
"'saic'",
",",
"'sic'",
"]",
... | Completely removes the specified specfiles from the
``msrunContainer``.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: str, [str, str] | [
"Completely",
"removes",
"the",
"specified",
"specfiles",
"from",
"the",
"msrunContainer",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L408-L423 | train | 55,374 |
hollenstein/maspy | maspy/core.py | MsrunContainer._processDatatypes | def _processDatatypes(self, rm, ci, smi, sai, si):
"""Helper function that returns a list of datatype strings, depending
on the parameters boolean value.
:param rm: bool, True to add ``rm``
:param ci: bool, True to add ``ci``
:param smi: bool, True to add ``smi``
:param sai: bool, True to add ``sai``
:param si: bool, True to add ``si``
:returns: [datatype1, ...]
"""
datatypes = list()
for datatype, value in [('rm', rm), ('ci', ci), ('smi', smi),
('sai', sai), ('si', si)]:
if value:
datatypes.append(datatype)
return datatypes | python | def _processDatatypes(self, rm, ci, smi, sai, si):
"""Helper function that returns a list of datatype strings, depending
on the parameters boolean value.
:param rm: bool, True to add ``rm``
:param ci: bool, True to add ``ci``
:param smi: bool, True to add ``smi``
:param sai: bool, True to add ``sai``
:param si: bool, True to add ``si``
:returns: [datatype1, ...]
"""
datatypes = list()
for datatype, value in [('rm', rm), ('ci', ci), ('smi', smi),
('sai', sai), ('si', si)]:
if value:
datatypes.append(datatype)
return datatypes | [
"def",
"_processDatatypes",
"(",
"self",
",",
"rm",
",",
"ci",
",",
"smi",
",",
"sai",
",",
"si",
")",
":",
"datatypes",
"=",
"list",
"(",
")",
"for",
"datatype",
",",
"value",
"in",
"[",
"(",
"'rm'",
",",
"rm",
")",
",",
"(",
"'ci'",
",",
"ci"... | Helper function that returns a list of datatype strings, depending
on the parameters boolean value.
:param rm: bool, True to add ``rm``
:param ci: bool, True to add ``ci``
:param smi: bool, True to add ``smi``
:param sai: bool, True to add ``sai``
:param si: bool, True to add ``si``
:returns: [datatype1, ...] | [
"Helper",
"function",
"that",
"returns",
"a",
"list",
"of",
"datatype",
"strings",
"depending",
"on",
"the",
"parameters",
"boolean",
"value",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L425-L442 | train | 55,375 |
hollenstein/maspy | maspy/core.py | MsrunContainer.save | def save(self, specfiles=None, rm=False, ci=False, smi=False, sai=False,
si=False, compress=True, path=None):
"""Writes the specified datatypes to ``mrc`` files on the hard disk.
.. note::
If ``.save()`` is called and no ``mrc`` files are present in the
specified path new files are generated, otherwise old files are
replaced.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param rm: bool, True to select ``self.rmc`` (run metadata)
:param ci: bool, True to select ``self.cic`` (chromatogram items)
:param smi: bool, True to select ``self.smic`` (spectrum metadata items)
:param sai: bool, True to select ``self.saic`` (spectrum array items)
:param si: bool, True to select ``self.sic`` (spectrum items)
:param compress: bool, True to use zip file compression
:param path: filedirectory to which the ``mrc`` files are written. By
default the parameter is set to ``None`` and the filedirectory is
read from ``self.info[specfile]['path']``
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
datatypes = self._processDatatypes(rm, ci, smi, sai, si)
if len(datatypes) == 0:
datatypes = ['rm', 'ci', 'smi', 'sai', 'si']
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "MsrunContainer.save()": "%s" '\
'is not present in "MsrunContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
msrunInfo = self.info[specfile]
specfilePath = msrunInfo['path'] if path is None else path
with aux.PartiallySafeReplace() as msr:
for datatype in datatypes:
filename = specfile + '.mrc_' + datatype
filepath = aux.joinpath(specfilePath, filename)
with msr.open(filepath, 'w+b') as openfile:
if datatype == 'rm':
self._writeRmc(openfile, specfile)
elif datatype == 'ci':
self._writeCic(openfile, specfile, compress)
elif datatype == 'si':
self._writeSic(openfile, specfile, compress)
elif datatype == 'smi':
self._writeSmic(openfile, specfile, compress)
elif datatype == 'sai':
self._writeSaic(openfile, specfile, compress) | python | def save(self, specfiles=None, rm=False, ci=False, smi=False, sai=False,
si=False, compress=True, path=None):
"""Writes the specified datatypes to ``mrc`` files on the hard disk.
.. note::
If ``.save()`` is called and no ``mrc`` files are present in the
specified path new files are generated, otherwise old files are
replaced.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param rm: bool, True to select ``self.rmc`` (run metadata)
:param ci: bool, True to select ``self.cic`` (chromatogram items)
:param smi: bool, True to select ``self.smic`` (spectrum metadata items)
:param sai: bool, True to select ``self.saic`` (spectrum array items)
:param si: bool, True to select ``self.sic`` (spectrum items)
:param compress: bool, True to use zip file compression
:param path: filedirectory to which the ``mrc`` files are written. By
default the parameter is set to ``None`` and the filedirectory is
read from ``self.info[specfile]['path']``
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
datatypes = self._processDatatypes(rm, ci, smi, sai, si)
if len(datatypes) == 0:
datatypes = ['rm', 'ci', 'smi', 'sai', 'si']
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "MsrunContainer.save()": "%s" '\
'is not present in "MsrunContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
msrunInfo = self.info[specfile]
specfilePath = msrunInfo['path'] if path is None else path
with aux.PartiallySafeReplace() as msr:
for datatype in datatypes:
filename = specfile + '.mrc_' + datatype
filepath = aux.joinpath(specfilePath, filename)
with msr.open(filepath, 'w+b') as openfile:
if datatype == 'rm':
self._writeRmc(openfile, specfile)
elif datatype == 'ci':
self._writeCic(openfile, specfile, compress)
elif datatype == 'si':
self._writeSic(openfile, specfile, compress)
elif datatype == 'smi':
self._writeSmic(openfile, specfile, compress)
elif datatype == 'sai':
self._writeSaic(openfile, specfile, compress) | [
"def",
"save",
"(",
"self",
",",
"specfiles",
"=",
"None",
",",
"rm",
"=",
"False",
",",
"ci",
"=",
"False",
",",
"smi",
"=",
"False",
",",
"sai",
"=",
"False",
",",
"si",
"=",
"False",
",",
"compress",
"=",
"True",
",",
"path",
"=",
"None",
")... | Writes the specified datatypes to ``mrc`` files on the hard disk.
.. note::
If ``.save()`` is called and no ``mrc`` files are present in the
specified path new files are generated, otherwise old files are
replaced.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param rm: bool, True to select ``self.rmc`` (run metadata)
:param ci: bool, True to select ``self.cic`` (chromatogram items)
:param smi: bool, True to select ``self.smic`` (spectrum metadata items)
:param sai: bool, True to select ``self.saic`` (spectrum array items)
:param si: bool, True to select ``self.sic`` (spectrum items)
:param compress: bool, True to use zip file compression
:param path: filedirectory to which the ``mrc`` files are written. By
default the parameter is set to ``None`` and the filedirectory is
read from ``self.info[specfile]['path']`` | [
"Writes",
"the",
"specified",
"datatypes",
"to",
"mrc",
"files",
"on",
"the",
"hard",
"disk",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L444-L499 | train | 55,376 |
hollenstein/maspy | maspy/core.py | MsrunContainer._writeRmc | def _writeRmc(self, filelike, specfile):
"""Writes the ``.rmc`` container entry of the specified specfile as an
human readable and pretty formatted xml string.
:param filelike: path to a file (str) or a file-like object
:param specfile: name of an ms-run file present in ``self.info``
"""
xmlString = ETREE.tostring(self.rmc[specfile], pretty_print=True)
filelike.write(xmlString) | python | def _writeRmc(self, filelike, specfile):
"""Writes the ``.rmc`` container entry of the specified specfile as an
human readable and pretty formatted xml string.
:param filelike: path to a file (str) or a file-like object
:param specfile: name of an ms-run file present in ``self.info``
"""
xmlString = ETREE.tostring(self.rmc[specfile], pretty_print=True)
filelike.write(xmlString) | [
"def",
"_writeRmc",
"(",
"self",
",",
"filelike",
",",
"specfile",
")",
":",
"xmlString",
"=",
"ETREE",
".",
"tostring",
"(",
"self",
".",
"rmc",
"[",
"specfile",
"]",
",",
"pretty_print",
"=",
"True",
")",
"filelike",
".",
"write",
"(",
"xmlString",
"... | Writes the ``.rmc`` container entry of the specified specfile as an
human readable and pretty formatted xml string.
:param filelike: path to a file (str) or a file-like object
:param specfile: name of an ms-run file present in ``self.info`` | [
"Writes",
"the",
".",
"rmc",
"container",
"entry",
"of",
"the",
"specified",
"specfile",
"as",
"an",
"human",
"readable",
"and",
"pretty",
"formatted",
"xml",
"string",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L545-L553 | train | 55,377 |
hollenstein/maspy | maspy/core.py | MsrunContainer.load | def load(self, specfiles=None, rm=False, ci=False, smi=False, sai=False,
si=False):
"""Import the specified datatypes from ``mrc`` files on the hard disk.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param rm: bool, True to import ``mrc_rm`` (run metadata)
:param ci: bool, True to import ``mrc_ci`` (chromatogram items)
:param smi: bool, True to import ``mrc_smi`` (spectrum metadata items)
:param sai: bool, True to import ``mrc_sai`` (spectrum array items)
:param si: bool, True to import ``mrc_si`` (spectrum items)
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
#Select only specfiles which are present in the ``self.info``.
selectedSpecfiles = list()
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "MsrunContainer.load()": "%s" '\
'not present in MsrunContainer.info' % specfile
warnings.warn(warntext)
else:
selectedSpecfiles.append(specfile)
datatypes = self._processDatatypes(rm, ci, smi, sai, si)
if len(datatypes) == 0:
datatypes = ['rm', 'ci', 'smi', 'sai', 'si']
for specfile in selectedSpecfiles:
msrunInfo = self.info[specfile]
specfilePath = msrunInfo['path']
if 'rm' in datatypes:
rmPath = aux.joinpath(specfilePath, specfile+'.mrc_rm')
with open(rmPath, 'rb') as openfile:
xmlString = openfile.read()
self.rmc[specfile] = ETREE.fromstring(xmlString)
msrunInfo['status']['rm'] = True
if 'ci' in datatypes:
ciPath = aux.joinpath(specfilePath, specfile+'.mrc_ci')
self.cic[specfile] = aux.loadBinaryItemContainer(ciPath,
Ci.jsonHook)
msrunInfo['status']['ci'] = True
if 'smi' in datatypes:
smiPath = aux.joinpath(specfilePath, specfile+'.mrc_smi')
with zipfile.ZipFile(smiPath, 'r') as containerZip:
#Convert the zipfile data into a str object,necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
self.smic[specfile] = json.loads(jsonString,
object_hook=Smi.jsonHook
)
msrunInfo['status']['smi'] = True
if 'sai' in datatypes:
saiPath = aux.joinpath(specfilePath, specfile+'.mrc_sai')
self.saic[specfile] = aux.loadBinaryItemContainer(saiPath,
Sai.jsonHook
)
msrunInfo['status']['sai'] = True
if 'si' in datatypes:
siPath = aux.joinpath(specfilePath, specfile+'.mrc_si')
with zipfile.ZipFile(siPath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
self.sic[specfile] = json.loads(jsonString,
object_hook=Si.jsonHook
)
msrunInfo['status']['si'] = True | python | def load(self, specfiles=None, rm=False, ci=False, smi=False, sai=False,
si=False):
"""Import the specified datatypes from ``mrc`` files on the hard disk.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param rm: bool, True to import ``mrc_rm`` (run metadata)
:param ci: bool, True to import ``mrc_ci`` (chromatogram items)
:param smi: bool, True to import ``mrc_smi`` (spectrum metadata items)
:param sai: bool, True to import ``mrc_sai`` (spectrum array items)
:param si: bool, True to import ``mrc_si`` (spectrum items)
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
#Select only specfiles which are present in the ``self.info``.
selectedSpecfiles = list()
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "MsrunContainer.load()": "%s" '\
'not present in MsrunContainer.info' % specfile
warnings.warn(warntext)
else:
selectedSpecfiles.append(specfile)
datatypes = self._processDatatypes(rm, ci, smi, sai, si)
if len(datatypes) == 0:
datatypes = ['rm', 'ci', 'smi', 'sai', 'si']
for specfile in selectedSpecfiles:
msrunInfo = self.info[specfile]
specfilePath = msrunInfo['path']
if 'rm' in datatypes:
rmPath = aux.joinpath(specfilePath, specfile+'.mrc_rm')
with open(rmPath, 'rb') as openfile:
xmlString = openfile.read()
self.rmc[specfile] = ETREE.fromstring(xmlString)
msrunInfo['status']['rm'] = True
if 'ci' in datatypes:
ciPath = aux.joinpath(specfilePath, specfile+'.mrc_ci')
self.cic[specfile] = aux.loadBinaryItemContainer(ciPath,
Ci.jsonHook)
msrunInfo['status']['ci'] = True
if 'smi' in datatypes:
smiPath = aux.joinpath(specfilePath, specfile+'.mrc_smi')
with zipfile.ZipFile(smiPath, 'r') as containerZip:
#Convert the zipfile data into a str object,necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
self.smic[specfile] = json.loads(jsonString,
object_hook=Smi.jsonHook
)
msrunInfo['status']['smi'] = True
if 'sai' in datatypes:
saiPath = aux.joinpath(specfilePath, specfile+'.mrc_sai')
self.saic[specfile] = aux.loadBinaryItemContainer(saiPath,
Sai.jsonHook
)
msrunInfo['status']['sai'] = True
if 'si' in datatypes:
siPath = aux.joinpath(specfilePath, specfile+'.mrc_si')
with zipfile.ZipFile(siPath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
self.sic[specfile] = json.loads(jsonString,
object_hook=Si.jsonHook
)
msrunInfo['status']['si'] = True | [
"def",
"load",
"(",
"self",
",",
"specfiles",
"=",
"None",
",",
"rm",
"=",
"False",
",",
"ci",
"=",
"False",
",",
"smi",
"=",
"False",
",",
"sai",
"=",
"False",
",",
"si",
"=",
"False",
")",
":",
"if",
"specfiles",
"is",
"None",
":",
"specfiles",... | Import the specified datatypes from ``mrc`` files on the hard disk.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
:param rm: bool, True to import ``mrc_rm`` (run metadata)
:param ci: bool, True to import ``mrc_ci`` (chromatogram items)
:param smi: bool, True to import ``mrc_smi`` (spectrum metadata items)
:param sai: bool, True to import ``mrc_sai`` (spectrum array items)
:param si: bool, True to import ``mrc_si`` (spectrum items) | [
"Import",
"the",
"specified",
"datatypes",
"from",
"mrc",
"files",
"on",
"the",
"hard",
"disk",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L555-L635 | train | 55,378 |
hollenstein/maspy | maspy/core.py | Ci.jsonHook | def jsonHook(encoded):
"""Custom JSON decoder that allows construction of a new ``Ci`` instance
from a decoded JSON object.
:param encoded: a JSON decoded object literal (a dict)
:returns: "encoded" or one of the these objects: :class:`Ci`,
:class:`MzmlProduct`, :class:`MzmlPrecursor`
"""
if '__Ci__' in encoded:
return Ci._fromJSON(encoded['__Ci__'])
elif '__MzmlProduct__' in encoded:
return MzmlProduct._fromJSON(encoded['__MzmlProduct__'])
elif '__MzmlPrecursor__' in encoded:
return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__'])
else:
return encoded | python | def jsonHook(encoded):
"""Custom JSON decoder that allows construction of a new ``Ci`` instance
from a decoded JSON object.
:param encoded: a JSON decoded object literal (a dict)
:returns: "encoded" or one of the these objects: :class:`Ci`,
:class:`MzmlProduct`, :class:`MzmlPrecursor`
"""
if '__Ci__' in encoded:
return Ci._fromJSON(encoded['__Ci__'])
elif '__MzmlProduct__' in encoded:
return MzmlProduct._fromJSON(encoded['__MzmlProduct__'])
elif '__MzmlPrecursor__' in encoded:
return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__'])
else:
return encoded | [
"def",
"jsonHook",
"(",
"encoded",
")",
":",
"if",
"'__Ci__'",
"in",
"encoded",
":",
"return",
"Ci",
".",
"_fromJSON",
"(",
"encoded",
"[",
"'__Ci__'",
"]",
")",
"elif",
"'__MzmlProduct__'",
"in",
"encoded",
":",
"return",
"MzmlProduct",
".",
"_fromJSON",
... | Custom JSON decoder that allows construction of a new ``Ci`` instance
from a decoded JSON object.
:param encoded: a JSON decoded object literal (a dict)
:returns: "encoded" or one of the these objects: :class:`Ci`,
:class:`MzmlProduct`, :class:`MzmlPrecursor` | [
"Custom",
"JSON",
"decoder",
"that",
"allows",
"construction",
"of",
"a",
"new",
"Ci",
"instance",
"from",
"a",
"decoded",
"JSON",
"object",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L741-L757 | train | 55,379 |
hollenstein/maspy | maspy/core.py | Smi.jsonHook | def jsonHook(encoded):
"""Custom JSON decoder that allows construction of a new ``Smi``
instance from a decoded JSON object.
:param encoded: a JSON decoded object literal (a dict)
:returns: "encoded" or one of the these objects: :class:`Smi`,
:class:`MzmlScan`, :class:`MzmlProduct`, :class:`MzmlPrecursor`
"""
if '__Smi__' in encoded:
return Smi._fromJSON(encoded['__Smi__'])
elif '__MzmlScan__' in encoded:
return MzmlScan._fromJSON(encoded['__MzmlScan__'])
elif '__MzmlProduct__' in encoded:
return MzmlProduct._fromJSON(encoded['__MzmlProduct__'])
elif '__MzmlPrecursor__' in encoded:
return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__'])
else:
return encoded | python | def jsonHook(encoded):
"""Custom JSON decoder that allows construction of a new ``Smi``
instance from a decoded JSON object.
:param encoded: a JSON decoded object literal (a dict)
:returns: "encoded" or one of the these objects: :class:`Smi`,
:class:`MzmlScan`, :class:`MzmlProduct`, :class:`MzmlPrecursor`
"""
if '__Smi__' in encoded:
return Smi._fromJSON(encoded['__Smi__'])
elif '__MzmlScan__' in encoded:
return MzmlScan._fromJSON(encoded['__MzmlScan__'])
elif '__MzmlProduct__' in encoded:
return MzmlProduct._fromJSON(encoded['__MzmlProduct__'])
elif '__MzmlPrecursor__' in encoded:
return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__'])
else:
return encoded | [
"def",
"jsonHook",
"(",
"encoded",
")",
":",
"if",
"'__Smi__'",
"in",
"encoded",
":",
"return",
"Smi",
".",
"_fromJSON",
"(",
"encoded",
"[",
"'__Smi__'",
"]",
")",
"elif",
"'__MzmlScan__'",
"in",
"encoded",
":",
"return",
"MzmlScan",
".",
"_fromJSON",
"("... | Custom JSON decoder that allows construction of a new ``Smi``
instance from a decoded JSON object.
:param encoded: a JSON decoded object literal (a dict)
:returns: "encoded" or one of the these objects: :class:`Smi`,
:class:`MzmlScan`, :class:`MzmlProduct`, :class:`MzmlPrecursor` | [
"Custom",
"JSON",
"decoder",
"that",
"allows",
"construction",
"of",
"a",
"new",
"Smi",
"instance",
"from",
"a",
"decoded",
"JSON",
"object",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L935-L953 | train | 55,380 |
hollenstein/maspy | maspy/core.py | SiiContainer.removeSpecfile | def removeSpecfile(self, specfiles):
"""Completely removes the specified specfiles from the ``SiiContainer``.
:param specfiles: the name of an ms-run file or a list of names.
"""
for specfile in aux.toList(specfiles):
del self.container[specfile]
del self.info[specfile] | python | def removeSpecfile(self, specfiles):
"""Completely removes the specified specfiles from the ``SiiContainer``.
:param specfiles: the name of an ms-run file or a list of names.
"""
for specfile in aux.toList(specfiles):
del self.container[specfile]
del self.info[specfile] | [
"def",
"removeSpecfile",
"(",
"self",
",",
"specfiles",
")",
":",
"for",
"specfile",
"in",
"aux",
".",
"toList",
"(",
"specfiles",
")",
":",
"del",
"self",
".",
"container",
"[",
"specfile",
"]",
"del",
"self",
".",
"info",
"[",
"specfile",
"]"
] | Completely removes the specified specfiles from the ``SiiContainer``.
:param specfiles: the name of an ms-run file or a list of names. | [
"Completely",
"removes",
"the",
"specified",
"specfiles",
"from",
"the",
"SiiContainer",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L1487-L1494 | train | 55,381 |
hollenstein/maspy | maspy/core.py | SiiContainer.save | def save(self, specfiles=None, compress=True, path=None):
"""Writes the specified specfiles to ``siic`` files on the hard disk.
.. note::
If ``.save()`` is called and no ``siic`` files are present in the
specified path new files are generated, otherwise old files are
replaced.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:param compress: bool, True to use zip file compression
:param path: filedirectory to which the ``siic`` files are written. By
default the parameter is set to ``None`` and the filedirectory is
read from ``self.info[specfile]['path']``
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "SiiContainer.save()": "%s" is'\
' not present in "SiiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
path = self.info[specfile]['path'] if path is None else path
with aux.PartiallySafeReplace() as msr:
filename = specfile + '.siic'
filepath = aux.joinpath(path, filename)
with msr.open(filepath, mode='w+b') as openfile:
self._writeContainer(openfile, specfile, compress) | python | def save(self, specfiles=None, compress=True, path=None):
"""Writes the specified specfiles to ``siic`` files on the hard disk.
.. note::
If ``.save()`` is called and no ``siic`` files are present in the
specified path new files are generated, otherwise old files are
replaced.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:param compress: bool, True to use zip file compression
:param path: filedirectory to which the ``siic`` files are written. By
default the parameter is set to ``None`` and the filedirectory is
read from ``self.info[specfile]['path']``
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "SiiContainer.save()": "%s" is'\
' not present in "SiiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
path = self.info[specfile]['path'] if path is None else path
with aux.PartiallySafeReplace() as msr:
filename = specfile + '.siic'
filepath = aux.joinpath(path, filename)
with msr.open(filepath, mode='w+b') as openfile:
self._writeContainer(openfile, specfile, compress) | [
"def",
"save",
"(",
"self",
",",
"specfiles",
"=",
"None",
",",
"compress",
"=",
"True",
",",
"path",
"=",
"None",
")",
":",
"if",
"specfiles",
"is",
"None",
":",
"specfiles",
"=",
"[",
"_",
"for",
"_",
"in",
"viewkeys",
"(",
"self",
".",
"info",
... | Writes the specified specfiles to ``siic`` files on the hard disk.
.. note::
If ``.save()`` is called and no ``siic`` files are present in the
specified path new files are generated, otherwise old files are
replaced.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:param compress: bool, True to use zip file compression
:param path: filedirectory to which the ``siic`` files are written. By
default the parameter is set to ``None`` and the filedirectory is
read from ``self.info[specfile]['path']`` | [
"Writes",
"the",
"specified",
"specfiles",
"to",
"siic",
"files",
"on",
"the",
"hard",
"disk",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L1496-L1530 | train | 55,382 |
hollenstein/maspy | maspy/core.py | SiiContainer.calcMz | def calcMz(self, specfiles=None, guessCharge=True, obsMzKey='obsMz'):
"""Calculate the exact mass for ``Sii`` elements from the
``Sii.peptide`` sequence.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:param guessCharge: bool, True if the charge should be guessed if the
attribute ``charge`` is missing from ``Sii``. Uses the calculated
peptide mass and the observed m/z value to calculate the charge.
:param obsMzKey: attribute name of the observed m/z value in ``Sii``.
"""
#TODO: important to test function, since changes were made
_calcMass = maspy.peptidemethods.calcPeptideMass
_calcMzFromMass = maspy.peptidemethods.calcMzFromMass
_massProton = maspy.constants.atomicMassProton
_guessCharge = lambda mass, mz: round(mass / (mz - _massProton), 0)
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
tempMasses = dict()
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "SiiContainer.calcMz()": '\
'"%s" is not present in "SiiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
else:
for sii in self.getItems(specfiles=specfile):
peptide = sii.peptide
if peptide not in tempMasses:
if hasattr(sii, 'diPeptide'):
tempMasses[peptide] = (_calcMass(sii.peptide1) +
_calcMass(sii.peptide2)
)
else:
tempMasses[peptide] = _calcMass(peptide)
peptideMass = tempMasses[peptide]
if sii.charge is not None:
sii.excMz = _calcMzFromMass(peptideMass, sii.charge)
elif guessCharge:
guessedCharge = _guessCharge(peptideMass,
getattr(sii, obsMzKey)
)
sii.excMz = _calcMzFromMass(peptideMass, guessedCharge)
sii.charge = guessedCharge
else:
sii.excMz = None
del(tempMasses) | python | def calcMz(self, specfiles=None, guessCharge=True, obsMzKey='obsMz'):
"""Calculate the exact mass for ``Sii`` elements from the
``Sii.peptide`` sequence.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:param guessCharge: bool, True if the charge should be guessed if the
attribute ``charge`` is missing from ``Sii``. Uses the calculated
peptide mass and the observed m/z value to calculate the charge.
:param obsMzKey: attribute name of the observed m/z value in ``Sii``.
"""
#TODO: important to test function, since changes were made
_calcMass = maspy.peptidemethods.calcPeptideMass
_calcMzFromMass = maspy.peptidemethods.calcMzFromMass
_massProton = maspy.constants.atomicMassProton
_guessCharge = lambda mass, mz: round(mass / (mz - _massProton), 0)
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
tempMasses = dict()
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "SiiContainer.calcMz()": '\
'"%s" is not present in "SiiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
else:
for sii in self.getItems(specfiles=specfile):
peptide = sii.peptide
if peptide not in tempMasses:
if hasattr(sii, 'diPeptide'):
tempMasses[peptide] = (_calcMass(sii.peptide1) +
_calcMass(sii.peptide2)
)
else:
tempMasses[peptide] = _calcMass(peptide)
peptideMass = tempMasses[peptide]
if sii.charge is not None:
sii.excMz = _calcMzFromMass(peptideMass, sii.charge)
elif guessCharge:
guessedCharge = _guessCharge(peptideMass,
getattr(sii, obsMzKey)
)
sii.excMz = _calcMzFromMass(peptideMass, guessedCharge)
sii.charge = guessedCharge
else:
sii.excMz = None
del(tempMasses) | [
"def",
"calcMz",
"(",
"self",
",",
"specfiles",
"=",
"None",
",",
"guessCharge",
"=",
"True",
",",
"obsMzKey",
"=",
"'obsMz'",
")",
":",
"#TODO: important to test function, since changes were made",
"_calcMass",
"=",
"maspy",
".",
"peptidemethods",
".",
"calcPeptide... | Calculate the exact mass for ``Sii`` elements from the
``Sii.peptide`` sequence.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:param guessCharge: bool, True if the charge should be guessed if the
attribute ``charge`` is missing from ``Sii``. Uses the calculated
peptide mass and the observed m/z value to calculate the charge.
:param obsMzKey: attribute name of the observed m/z value in ``Sii``. | [
"Calculate",
"the",
"exact",
"mass",
"for",
"Sii",
"elements",
"from",
"the",
"Sii",
".",
"peptide",
"sequence",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L1631-L1681 | train | 55,383 |
hollenstein/maspy | maspy/core.py | FiContainer._writeContainer | def _writeContainer(self, filelike, specfile, compress):
"""Writes the ``self.container`` entry of the specified specfile to the
``fic`` format.
:param filelike: path to a file (str) or a file-like object
:param specfile: name of an ms-run file present in ``self.info``
:param compress: bool, True to use zip file compression
.. note::
In addition it could also dump the ``self.info`` entry to the
zipfile with the filename ``info``, but this is not used at the
moment. For details see :func:`maspy.auxiliary.writeJsonZipfile()`
"""
aux.writeJsonZipfile(filelike, self.container[specfile],
compress=compress
) | python | def _writeContainer(self, filelike, specfile, compress):
"""Writes the ``self.container`` entry of the specified specfile to the
``fic`` format.
:param filelike: path to a file (str) or a file-like object
:param specfile: name of an ms-run file present in ``self.info``
:param compress: bool, True to use zip file compression
.. note::
In addition it could also dump the ``self.info`` entry to the
zipfile with the filename ``info``, but this is not used at the
moment. For details see :func:`maspy.auxiliary.writeJsonZipfile()`
"""
aux.writeJsonZipfile(filelike, self.container[specfile],
compress=compress
) | [
"def",
"_writeContainer",
"(",
"self",
",",
"filelike",
",",
"specfile",
",",
"compress",
")",
":",
"aux",
".",
"writeJsonZipfile",
"(",
"filelike",
",",
"self",
".",
"container",
"[",
"specfile",
"]",
",",
"compress",
"=",
"compress",
")"
] | Writes the ``self.container`` entry of the specified specfile to the
``fic`` format.
:param filelike: path to a file (str) or a file-like object
:param specfile: name of an ms-run file present in ``self.info``
:param compress: bool, True to use zip file compression
.. note::
In addition it could also dump the ``self.info`` entry to the
zipfile with the filename ``info``, but this is not used at the
moment. For details see :func:`maspy.auxiliary.writeJsonZipfile()` | [
"Writes",
"the",
"self",
".",
"container",
"entry",
"of",
"the",
"specified",
"specfile",
"to",
"the",
"fic",
"format",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L1951-L1966 | train | 55,384 |
hollenstein/maspy | maspy/core.py | FiContainer.load | def load(self, specfiles=None):
"""Imports the specified ``fic`` files from the hard disk.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "FiContainer.load()": "%s" is'\
' not present in "FiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
fiPath = aux.joinpath(self.info[specfile]['path'],
specfile+'.fic'
)
with zipfile.ZipFile(fiPath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
#infoString = io.TextIOWrapper(containerZip.open('info'),
# encoding='utf-8'
# ).read()
self.container[specfile] = json.loads(jsonString,
object_hook=Fi.jsonHook
) | python | def load(self, specfiles=None):
"""Imports the specified ``fic`` files from the hard disk.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str]
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "FiContainer.load()": "%s" is'\
' not present in "FiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
continue
else:
fiPath = aux.joinpath(self.info[specfile]['path'],
specfile+'.fic'
)
with zipfile.ZipFile(fiPath, 'r') as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
jsonString = io.TextIOWrapper(containerZip.open('data'),
encoding='utf-8'
).read()
#infoString = io.TextIOWrapper(containerZip.open('info'),
# encoding='utf-8'
# ).read()
self.container[specfile] = json.loads(jsonString,
object_hook=Fi.jsonHook
) | [
"def",
"load",
"(",
"self",
",",
"specfiles",
"=",
"None",
")",
":",
"if",
"specfiles",
"is",
"None",
":",
"specfiles",
"=",
"[",
"_",
"for",
"_",
"in",
"viewkeys",
"(",
"self",
".",
"info",
")",
"]",
"else",
":",
"specfiles",
"=",
"aux",
".",
"t... | Imports the specified ``fic`` files from the hard disk.
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:type specfiles: None, str, [str, str] | [
"Imports",
"the",
"specified",
"fic",
"files",
"from",
"the",
"hard",
"disk",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/core.py#L1974-L2008 | train | 55,385 |
ProjetPP/PPP-datamodel-Python | ppp_datamodel/utils/serializableattributesholder.py | SerializableAttributesHolder.as_dict | def as_dict(self):
"""Returns a JSON-serializeable object representing this tree."""
def conv(v):
if isinstance(v, SerializableAttributesHolder):
return v.as_dict()
elif isinstance(v, list):
return [conv(x) for x in v]
elif isinstance(v, dict):
return {x:conv(y) for (x,y) in v.items()}
else:
return v
return {k.replace('_', '-'): conv(v) for (k, v) in self._attributes.items()} | python | def as_dict(self):
"""Returns a JSON-serializeable object representing this tree."""
def conv(v):
if isinstance(v, SerializableAttributesHolder):
return v.as_dict()
elif isinstance(v, list):
return [conv(x) for x in v]
elif isinstance(v, dict):
return {x:conv(y) for (x,y) in v.items()}
else:
return v
return {k.replace('_', '-'): conv(v) for (k, v) in self._attributes.items()} | [
"def",
"as_dict",
"(",
"self",
")",
":",
"def",
"conv",
"(",
"v",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"SerializableAttributesHolder",
")",
":",
"return",
"v",
".",
"as_dict",
"(",
")",
"elif",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
... | Returns a JSON-serializeable object representing this tree. | [
"Returns",
"a",
"JSON",
"-",
"serializeable",
"object",
"representing",
"this",
"tree",
"."
] | 0c7958fb4df75468fd3137240a5065925c239776 | https://github.com/ProjetPP/PPP-datamodel-Python/blob/0c7958fb4df75468fd3137240a5065925c239776/ppp_datamodel/utils/serializableattributesholder.py#L8-L19 | train | 55,386 |
ProjetPP/PPP-datamodel-Python | ppp_datamodel/utils/serializableattributesholder.py | SerializableAttributesHolder.from_json | def from_json(cls, data):
"""Decode a JSON string and inflate a node instance."""
# Decode JSON string
assert isinstance(data, str)
data = json.loads(data)
assert isinstance(data, dict)
return cls.from_dict(data) | python | def from_json(cls, data):
"""Decode a JSON string and inflate a node instance."""
# Decode JSON string
assert isinstance(data, str)
data = json.loads(data)
assert isinstance(data, dict)
return cls.from_dict(data) | [
"def",
"from_json",
"(",
"cls",
",",
"data",
")",
":",
"# Decode JSON string",
"assert",
"isinstance",
"(",
"data",
",",
"str",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"assert",
"isinstance",
"(",
"data",
",",
"dict",
")",
"return",
"... | Decode a JSON string and inflate a node instance. | [
"Decode",
"a",
"JSON",
"string",
"and",
"inflate",
"a",
"node",
"instance",
"."
] | 0c7958fb4df75468fd3137240a5065925c239776 | https://github.com/ProjetPP/PPP-datamodel-Python/blob/0c7958fb4df75468fd3137240a5065925c239776/ppp_datamodel/utils/serializableattributesholder.py#L30-L36 | train | 55,387 |
bitesofcode/projex | projex/funcutil.py | extract_keywords | def extract_keywords(func):
"""
Parses the keywords from the given function.
:param func | <function>
"""
if hasattr(func, 'im_func'):
func = func.im_func
try:
return func.func_code.co_varnames[-len(func.func_defaults):]
except (TypeError, ValueError, IndexError):
return tuple() | python | def extract_keywords(func):
"""
Parses the keywords from the given function.
:param func | <function>
"""
if hasattr(func, 'im_func'):
func = func.im_func
try:
return func.func_code.co_varnames[-len(func.func_defaults):]
except (TypeError, ValueError, IndexError):
return tuple() | [
"def",
"extract_keywords",
"(",
"func",
")",
":",
"if",
"hasattr",
"(",
"func",
",",
"'im_func'",
")",
":",
"func",
"=",
"func",
".",
"im_func",
"try",
":",
"return",
"func",
".",
"func_code",
".",
"co_varnames",
"[",
"-",
"len",
"(",
"func",
".",
"f... | Parses the keywords from the given function.
:param func | <function> | [
"Parses",
"the",
"keywords",
"from",
"the",
"given",
"function",
"."
] | d31743ec456a41428709968ab11a2cf6c6c76247 | https://github.com/bitesofcode/projex/blob/d31743ec456a41428709968ab11a2cf6c6c76247/projex/funcutil.py#L5-L17 | train | 55,388 |
diamondman/proteusisc | proteusisc/drivers/digilentdriver.py | DigilentAdeptController.jtag_enable | def jtag_enable(self):
"""
Enables JTAG output on the controller. JTAG operations executed
before this function is called will return useless data or fail.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_bits(bitarray("001011111"), return_tdo=True)
>>> c.jtag_disable()
"""
status, _ = self.bulkCommand(_BMSG_ENABLE_JTAG)
if status == 0:
self._jtagon = True
elif status == 3:
self._jtagon = True
raise JTAGAlreadyEnabledError()
else:
raise JTAGEnableFailedError("Error enabling JTAG. Error code: %s." %status) | python | def jtag_enable(self):
"""
Enables JTAG output on the controller. JTAG operations executed
before this function is called will return useless data or fail.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_bits(bitarray("001011111"), return_tdo=True)
>>> c.jtag_disable()
"""
status, _ = self.bulkCommand(_BMSG_ENABLE_JTAG)
if status == 0:
self._jtagon = True
elif status == 3:
self._jtagon = True
raise JTAGAlreadyEnabledError()
else:
raise JTAGEnableFailedError("Error enabling JTAG. Error code: %s." %status) | [
"def",
"jtag_enable",
"(",
"self",
")",
":",
"status",
",",
"_",
"=",
"self",
".",
"bulkCommand",
"(",
"_BMSG_ENABLE_JTAG",
")",
"if",
"status",
"==",
"0",
":",
"self",
".",
"_jtagon",
"=",
"True",
"elif",
"status",
"==",
"3",
":",
"self",
".",
"_jta... | Enables JTAG output on the controller. JTAG operations executed
before this function is called will return useless data or fail.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_bits(bitarray("001011111"), return_tdo=True)
>>> c.jtag_disable() | [
"Enables",
"JTAG",
"output",
"on",
"the",
"controller",
".",
"JTAG",
"operations",
"executed",
"before",
"this",
"function",
"is",
"called",
"will",
"return",
"useless",
"data",
"or",
"fail",
"."
] | 7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c | https://github.com/diamondman/proteusisc/blob/7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c/proteusisc/drivers/digilentdriver.py#L242-L261 | train | 55,389 |
diamondman/proteusisc | proteusisc/drivers/digilentdriver.py | DigilentAdeptController.jtag_disable | def jtag_disable(self):
"""
Disables JTAG output on the controller. JTAG operations executed
immediately after this function will return useless data or fail.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_bits(bitarray("001011111"), return_tdo=True)
>>> c.jtag_disable()
"""
if not self._jtagon: return
status, _ = self.bulkCommand(_BMSG_DISABLE_JTAG)
if status == 0:
self._jtagon = False
elif status == 3:
raise JTAGControlError("Error Code %s"%status)
self.close_handle() | python | def jtag_disable(self):
"""
Disables JTAG output on the controller. JTAG operations executed
immediately after this function will return useless data or fail.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_bits(bitarray("001011111"), return_tdo=True)
>>> c.jtag_disable()
"""
if not self._jtagon: return
status, _ = self.bulkCommand(_BMSG_DISABLE_JTAG)
if status == 0:
self._jtagon = False
elif status == 3:
raise JTAGControlError("Error Code %s"%status)
self.close_handle() | [
"def",
"jtag_disable",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_jtagon",
":",
"return",
"status",
",",
"_",
"=",
"self",
".",
"bulkCommand",
"(",
"_BMSG_DISABLE_JTAG",
")",
"if",
"status",
"==",
"0",
":",
"self",
".",
"_jtagon",
"=",
"False",... | Disables JTAG output on the controller. JTAG operations executed
immediately after this function will return useless data or fail.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_bits(bitarray("001011111"), return_tdo=True)
>>> c.jtag_disable() | [
"Disables",
"JTAG",
"output",
"on",
"the",
"controller",
".",
"JTAG",
"operations",
"executed",
"immediately",
"after",
"this",
"function",
"will",
"return",
"useless",
"data",
"or",
"fail",
"."
] | 7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c | https://github.com/diamondman/proteusisc/blob/7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c/proteusisc/drivers/digilentdriver.py#L263-L283 | train | 55,390 |
diamondman/proteusisc | proteusisc/drivers/digilentdriver.py | DigilentAdeptController.write_tms_tdi_bits | def write_tms_tdi_bits(self, tmsdata, tdidata, return_tdo=False):
"""
Command controller to write arbitrary TDI and TMS data to the
physical scan chain. Optionally return TDO bits sent back
from the scan chain.
Args:
tmsdata - bits to send over TMS line of scan chain (bitarray)
must be the same length ad tdidata
tdidata - bits to send over TDI line of scan chain (bitarray)
must be the same length ad tmsdata
return_tdo (bool) - return the devices bitarray response
Returns:
None by default or the (bitarray) response of the device
after receiving data, if return_tdo is True.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_tdi_bits(bitarray("00001"),
bitarray("11111"), return_tdo=True)
>>> c.jtag_disable()
"""
self._check_jtag()
if len(tmsdata) != len(tdidata):
raise Exception("TMSdata and TDIData must be the same length")
self._update_scanchain(tmsdata)
count = len(tmsdata)
t = time()
outdata = bitarray([val for pair in zip(tmsdata, tdidata)
for val in pair])
outdata = build_byte_align_buff(outdata).tobytes()[::-1]
if self._scanchain and self._scanchain._print_statistics:
print("TDI/TDI DATA PREP TIME", time()-t)#pragma: no cover
t = time()
self.bulkCommandDefault(_BMSG_WRITE_TMS_TDI % \
(return_tdo, count.to_bytes(4, 'little')))
self.bulkWriteData(outdata)
if self._scanchain and self._scanchain._print_statistics:
print("TRANSFER TIME", time()-t)
t = time()
tdo_bits = self._read_tdo(count) if return_tdo else None
if self._scanchain and self._scanchain._print_statistics:
print("TDO READ TIME", time()-t)#pragma: no cover
self._get_adv_trans_stats(0x0A, return_tdo)
return tdo_bits | python | def write_tms_tdi_bits(self, tmsdata, tdidata, return_tdo=False):
"""
Command controller to write arbitrary TDI and TMS data to the
physical scan chain. Optionally return TDO bits sent back
from the scan chain.
Args:
tmsdata - bits to send over TMS line of scan chain (bitarray)
must be the same length ad tdidata
tdidata - bits to send over TDI line of scan chain (bitarray)
must be the same length ad tmsdata
return_tdo (bool) - return the devices bitarray response
Returns:
None by default or the (bitarray) response of the device
after receiving data, if return_tdo is True.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_tdi_bits(bitarray("00001"),
bitarray("11111"), return_tdo=True)
>>> c.jtag_disable()
"""
self._check_jtag()
if len(tmsdata) != len(tdidata):
raise Exception("TMSdata and TDIData must be the same length")
self._update_scanchain(tmsdata)
count = len(tmsdata)
t = time()
outdata = bitarray([val for pair in zip(tmsdata, tdidata)
for val in pair])
outdata = build_byte_align_buff(outdata).tobytes()[::-1]
if self._scanchain and self._scanchain._print_statistics:
print("TDI/TDI DATA PREP TIME", time()-t)#pragma: no cover
t = time()
self.bulkCommandDefault(_BMSG_WRITE_TMS_TDI % \
(return_tdo, count.to_bytes(4, 'little')))
self.bulkWriteData(outdata)
if self._scanchain and self._scanchain._print_statistics:
print("TRANSFER TIME", time()-t)
t = time()
tdo_bits = self._read_tdo(count) if return_tdo else None
if self._scanchain and self._scanchain._print_statistics:
print("TDO READ TIME", time()-t)#pragma: no cover
self._get_adv_trans_stats(0x0A, return_tdo)
return tdo_bits | [
"def",
"write_tms_tdi_bits",
"(",
"self",
",",
"tmsdata",
",",
"tdidata",
",",
"return_tdo",
"=",
"False",
")",
":",
"self",
".",
"_check_jtag",
"(",
")",
"if",
"len",
"(",
"tmsdata",
")",
"!=",
"len",
"(",
"tdidata",
")",
":",
"raise",
"Exception",
"(... | Command controller to write arbitrary TDI and TMS data to the
physical scan chain. Optionally return TDO bits sent back
from the scan chain.
Args:
tmsdata - bits to send over TMS line of scan chain (bitarray)
must be the same length ad tdidata
tdidata - bits to send over TDI line of scan chain (bitarray)
must be the same length ad tmsdata
return_tdo (bool) - return the devices bitarray response
Returns:
None by default or the (bitarray) response of the device
after receiving data, if return_tdo is True.
Usage:
>>> from proteusisc import getAttachedControllers, bitarray
>>> c = getAttachedControllers()[0]
>>> c.jtag_enable()
>>> c.write_tms_tdi_bits(bitarray("00001"),
bitarray("11111"), return_tdo=True)
>>> c.jtag_disable() | [
"Command",
"controller",
"to",
"write",
"arbitrary",
"TDI",
"and",
"TMS",
"data",
"to",
"the",
"physical",
"scan",
"chain",
".",
"Optionally",
"return",
"TDO",
"bits",
"sent",
"back",
"from",
"the",
"scan",
"chain",
"."
] | 7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c | https://github.com/diamondman/proteusisc/blob/7622b7b04e63f9dc0f5a04429ff78d9a490c9c5c/proteusisc/drivers/digilentdriver.py#L369-L423 | train | 55,391 |
hollenstein/maspy | maspy/proteindb.py | _readFastaFile | def _readFastaFile(filepath):
"""Read a FASTA file and yields tuples of 'header' and 'sequence' entries.
:param filepath: file path of the FASTA file
:yields: FASTA entries in the format ('header', 'sequence').
The 'header' string does not contain the '>' and trailing white spaces.
The 'sequence' string does not contain trailing white spaces, a '*' at
the end of the sequence is removed.
See also :func:`importProteinDatabase` and
:func:`maspy.peptidemethods.digestInSilico`.
"""
processSequences = lambda i: ''.join([s.rstrip() for s in i]).rstrip('*')
processHeaderLine = lambda line: line[1:].rstrip()
with io.open(filepath) as openfile:
#Iterate through lines until the first header is encountered
try:
line = next(openfile)
while line[0] != '>':
line = next(openfile)
header = processHeaderLine(line)
sequences = list()
except StopIteration:
errorText = 'File does not contain fasta entries.'
raise maspy.errors.FileFormatError(errorText)
for line in openfile:
if line[0] == '>':
yield header, processSequences(sequences)
header = processHeaderLine(line)
sequences = list()
else:
sequences.append(line)
#Yield last entry
if sequences:
yield header, processSequences(sequences) | python | def _readFastaFile(filepath):
"""Read a FASTA file and yields tuples of 'header' and 'sequence' entries.
:param filepath: file path of the FASTA file
:yields: FASTA entries in the format ('header', 'sequence').
The 'header' string does not contain the '>' and trailing white spaces.
The 'sequence' string does not contain trailing white spaces, a '*' at
the end of the sequence is removed.
See also :func:`importProteinDatabase` and
:func:`maspy.peptidemethods.digestInSilico`.
"""
processSequences = lambda i: ''.join([s.rstrip() for s in i]).rstrip('*')
processHeaderLine = lambda line: line[1:].rstrip()
with io.open(filepath) as openfile:
#Iterate through lines until the first header is encountered
try:
line = next(openfile)
while line[0] != '>':
line = next(openfile)
header = processHeaderLine(line)
sequences = list()
except StopIteration:
errorText = 'File does not contain fasta entries.'
raise maspy.errors.FileFormatError(errorText)
for line in openfile:
if line[0] == '>':
yield header, processSequences(sequences)
header = processHeaderLine(line)
sequences = list()
else:
sequences.append(line)
#Yield last entry
if sequences:
yield header, processSequences(sequences) | [
"def",
"_readFastaFile",
"(",
"filepath",
")",
":",
"processSequences",
"=",
"lambda",
"i",
":",
"''",
".",
"join",
"(",
"[",
"s",
".",
"rstrip",
"(",
")",
"for",
"s",
"in",
"i",
"]",
")",
".",
"rstrip",
"(",
"'*'",
")",
"processHeaderLine",
"=",
"... | Read a FASTA file and yields tuples of 'header' and 'sequence' entries.
:param filepath: file path of the FASTA file
:yields: FASTA entries in the format ('header', 'sequence').
The 'header' string does not contain the '>' and trailing white spaces.
The 'sequence' string does not contain trailing white spaces, a '*' at
the end of the sequence is removed.
See also :func:`importProteinDatabase` and
:func:`maspy.peptidemethods.digestInSilico`. | [
"Read",
"a",
"FASTA",
"file",
"and",
"yields",
"tuples",
"of",
"header",
"and",
"sequence",
"entries",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/proteindb.py#L506-L543 | train | 55,392 |
hollenstein/maspy | maspy/proteindb.py | fastaParseSgd | def fastaParseSgd(header):
"""Custom parser for fasta headers in the SGD format, see
www.yeastgenome.org.
:param header: str, protein entry header from a fasta file
:returns: dict, parsed header
"""
rePattern = '([\S]+)\s([\S]+).+(\".+\")'
ID, name, description = re.match(rePattern, header).groups()
info = {'id':ID, 'name':name, 'description':description}
return info | python | def fastaParseSgd(header):
"""Custom parser for fasta headers in the SGD format, see
www.yeastgenome.org.
:param header: str, protein entry header from a fasta file
:returns: dict, parsed header
"""
rePattern = '([\S]+)\s([\S]+).+(\".+\")'
ID, name, description = re.match(rePattern, header).groups()
info = {'id':ID, 'name':name, 'description':description}
return info | [
"def",
"fastaParseSgd",
"(",
"header",
")",
":",
"rePattern",
"=",
"'([\\S]+)\\s([\\S]+).+(\\\".+\\\")'",
"ID",
",",
"name",
",",
"description",
"=",
"re",
".",
"match",
"(",
"rePattern",
",",
"header",
")",
".",
"groups",
"(",
")",
"info",
"=",
"{",
"'id'... | Custom parser for fasta headers in the SGD format, see
www.yeastgenome.org.
:param header: str, protein entry header from a fasta file
:returns: dict, parsed header | [
"Custom",
"parser",
"for",
"fasta",
"headers",
"in",
"the",
"SGD",
"format",
"see",
"www",
".",
"yeastgenome",
".",
"org",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/proteindb.py#L578-L589 | train | 55,393 |
hollenstein/maspy | maspy/proteindb.py | ProteinDatabase.save | def save(self, path, compress=True):
"""Writes the ``.proteins`` and ``.peptides`` entries to the hard disk
as a ``proteindb`` file.
.. note::
If ``.save()`` is called and no ``proteindb`` file is present in the
specified path a new files is generated, otherwise the old file is
replaced.
:param path: filedirectory to which the ``proteindb`` file is written.
The output file name is specified by ``self.info['name']``
:param compress: bool, True to use zip file compression
"""
with aux.PartiallySafeReplace() as msr:
filename = self.info['name'] + '.proteindb'
filepath = aux.joinpath(path, filename)
with msr.open(filepath, mode='w+b') as openfile:
self._writeContainer(openfile, compress=compress) | python | def save(self, path, compress=True):
"""Writes the ``.proteins`` and ``.peptides`` entries to the hard disk
as a ``proteindb`` file.
.. note::
If ``.save()`` is called and no ``proteindb`` file is present in the
specified path a new files is generated, otherwise the old file is
replaced.
:param path: filedirectory to which the ``proteindb`` file is written.
The output file name is specified by ``self.info['name']``
:param compress: bool, True to use zip file compression
"""
with aux.PartiallySafeReplace() as msr:
filename = self.info['name'] + '.proteindb'
filepath = aux.joinpath(path, filename)
with msr.open(filepath, mode='w+b') as openfile:
self._writeContainer(openfile, compress=compress) | [
"def",
"save",
"(",
"self",
",",
"path",
",",
"compress",
"=",
"True",
")",
":",
"with",
"aux",
".",
"PartiallySafeReplace",
"(",
")",
"as",
"msr",
":",
"filename",
"=",
"self",
".",
"info",
"[",
"'name'",
"]",
"+",
"'.proteindb'",
"filepath",
"=",
"... | Writes the ``.proteins`` and ``.peptides`` entries to the hard disk
as a ``proteindb`` file.
.. note::
If ``.save()`` is called and no ``proteindb`` file is present in the
specified path a new files is generated, otherwise the old file is
replaced.
:param path: filedirectory to which the ``proteindb`` file is written.
The output file name is specified by ``self.info['name']``
:param compress: bool, True to use zip file compression | [
"Writes",
"the",
".",
"proteins",
"and",
".",
"peptides",
"entries",
"to",
"the",
"hard",
"disk",
"as",
"a",
"proteindb",
"file",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/proteindb.py#L255-L272 | train | 55,394 |
hollenstein/maspy | maspy/proteindb.py | ProteinDatabase.load | def load(cls, path, name):
"""Imports the specified ``proteindb`` file from the hard disk.
:param path: filedirectory of the ``proteindb`` file
:param name: filename without the file extension ".proteindb"
.. note:: this generates rather large files, which actually take longer
to import than to newly generate. Maybe saving / loading should be
limited to the protein database whitout in silico digestion
information.
"""
filepath = aux.joinpath(path, name + '.proteindb')
with zipfile.ZipFile(filepath, 'r', allowZip64=True) as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
proteinsString = io.TextIOWrapper(containerZip.open('proteins'),
encoding='utf-8'
).read()
peptidesString = io.TextIOWrapper(containerZip.open('peptides'),
encoding='utf-8'
).read()
infoString = io.TextIOWrapper(containerZip.open('info'),
encoding='utf-8'
).read()
newInstance = cls()
newInstance.proteins = json.loads(proteinsString,
object_hook=ProteinSequence.jsonHook)
newInstance.peptides = json.loads(peptidesString,
object_hook=PeptideSequence.jsonHook)
newInstance.info.update(json.loads(infoString))
return newInstance | python | def load(cls, path, name):
"""Imports the specified ``proteindb`` file from the hard disk.
:param path: filedirectory of the ``proteindb`` file
:param name: filename without the file extension ".proteindb"
.. note:: this generates rather large files, which actually take longer
to import than to newly generate. Maybe saving / loading should be
limited to the protein database whitout in silico digestion
information.
"""
filepath = aux.joinpath(path, name + '.proteindb')
with zipfile.ZipFile(filepath, 'r', allowZip64=True) as containerZip:
#Convert the zipfile data into a str object, necessary since
#containerZip.read() returns a bytes object.
proteinsString = io.TextIOWrapper(containerZip.open('proteins'),
encoding='utf-8'
).read()
peptidesString = io.TextIOWrapper(containerZip.open('peptides'),
encoding='utf-8'
).read()
infoString = io.TextIOWrapper(containerZip.open('info'),
encoding='utf-8'
).read()
newInstance = cls()
newInstance.proteins = json.loads(proteinsString,
object_hook=ProteinSequence.jsonHook)
newInstance.peptides = json.loads(peptidesString,
object_hook=PeptideSequence.jsonHook)
newInstance.info.update(json.loads(infoString))
return newInstance | [
"def",
"load",
"(",
"cls",
",",
"path",
",",
"name",
")",
":",
"filepath",
"=",
"aux",
".",
"joinpath",
"(",
"path",
",",
"name",
"+",
"'.proteindb'",
")",
"with",
"zipfile",
".",
"ZipFile",
"(",
"filepath",
",",
"'r'",
",",
"allowZip64",
"=",
"True"... | Imports the specified ``proteindb`` file from the hard disk.
:param path: filedirectory of the ``proteindb`` file
:param name: filename without the file extension ".proteindb"
.. note:: this generates rather large files, which actually take longer
to import than to newly generate. Maybe saving / loading should be
limited to the protein database whitout in silico digestion
information. | [
"Imports",
"the",
"specified",
"proteindb",
"file",
"from",
"the",
"hard",
"disk",
"."
] | f15fcfd24df306d8420540460d902aa3073ec133 | https://github.com/hollenstein/maspy/blob/f15fcfd24df306d8420540460d902aa3073ec133/maspy/proteindb.py#L293-L324 | train | 55,395 |
Prev/shaman | shamanld/trainer.py | fetch_keywords | def fetch_keywords(codedata) :
""" Fetch keywords by shaman.KeywordFetcher
Get average probabilities of keyword and language
"""
# Read row in codedata and count keywords in codes with langauge
tmp = {}
language_counts = {}
for index, (language, code) in enumerate(codedata) :
if language not in shaman.SUPPORTING_LANGUAGES :
continue
if language not in tmp :
tmp[language] = {}
language_counts[language] = 0
language_counts[language] += 1
for keyword in shaman.KeywordFetcher.fetch( code ) :
# if keyword exists in fetched data, add '1' to keyword data
tmp[language][keyword] = tmp[language].get(keyword, 0) + 1
print('Fetch keyword %d/%d ' % (index, len(codedata)), end='\r')
# Get dataset indexed by keyword
ret = {}
for language in tmp :
for keyword, count in tmp[ language ].items() :
if keyword not in ret :
ret[ keyword ] = {}
ret[ keyword ][ language ] = (count / language_counts[ language ]) # Probability
print('Fetch keyword completed ')
return ret | python | def fetch_keywords(codedata) :
""" Fetch keywords by shaman.KeywordFetcher
Get average probabilities of keyword and language
"""
# Read row in codedata and count keywords in codes with langauge
tmp = {}
language_counts = {}
for index, (language, code) in enumerate(codedata) :
if language not in shaman.SUPPORTING_LANGUAGES :
continue
if language not in tmp :
tmp[language] = {}
language_counts[language] = 0
language_counts[language] += 1
for keyword in shaman.KeywordFetcher.fetch( code ) :
# if keyword exists in fetched data, add '1' to keyword data
tmp[language][keyword] = tmp[language].get(keyword, 0) + 1
print('Fetch keyword %d/%d ' % (index, len(codedata)), end='\r')
# Get dataset indexed by keyword
ret = {}
for language in tmp :
for keyword, count in tmp[ language ].items() :
if keyword not in ret :
ret[ keyword ] = {}
ret[ keyword ][ language ] = (count / language_counts[ language ]) # Probability
print('Fetch keyword completed ')
return ret | [
"def",
"fetch_keywords",
"(",
"codedata",
")",
":",
"# Read row in codedata and count keywords in codes with langauge",
"tmp",
"=",
"{",
"}",
"language_counts",
"=",
"{",
"}",
"for",
"index",
",",
"(",
"language",
",",
"code",
")",
"in",
"enumerate",
"(",
"codedat... | Fetch keywords by shaman.KeywordFetcher
Get average probabilities of keyword and language | [
"Fetch",
"keywords",
"by",
"shaman",
".",
"KeywordFetcher",
"Get",
"average",
"probabilities",
"of",
"keyword",
"and",
"language"
] | 82891c17c6302f7f9881a215789856d460a85f9c | https://github.com/Prev/shaman/blob/82891c17c6302f7f9881a215789856d460a85f9c/shamanld/trainer.py#L60-L97 | train | 55,396 |
Prev/shaman | shamanld/trainer.py | match_patterns | def match_patterns(codedata) :
""" Match patterns by shaman.PatternMatcher
Get average ratio of pattern and language
"""
ret = {}
for index1, pattern in enumerate(shaman.PatternMatcher.PATTERNS) :
print('Matching pattern %d "%s"' % (index1+1, pattern))
matcher = shaman.PatternMatcher(pattern)
tmp = {}
for index2, (language, code) in enumerate(codedata) :
if language not in shaman.SUPPORTING_LANGUAGES :
continue
if len(code) <= 20 or len(code) > 100000 :
continue
if language not in tmp :
tmp[language] = []
ratio = matcher.getratio(code)
tmp[language].append(ratio)
print('Matching patterns %d/%d ' % (index2, len(codedata)), end='\r')
ret[pattern] = {}
for language, data in tmp.items() :
ret[pattern][language] = sum(tmp[language]) / max(len(tmp[language]), 1)
print('Matching patterns completed ')
return ret | python | def match_patterns(codedata) :
""" Match patterns by shaman.PatternMatcher
Get average ratio of pattern and language
"""
ret = {}
for index1, pattern in enumerate(shaman.PatternMatcher.PATTERNS) :
print('Matching pattern %d "%s"' % (index1+1, pattern))
matcher = shaman.PatternMatcher(pattern)
tmp = {}
for index2, (language, code) in enumerate(codedata) :
if language not in shaman.SUPPORTING_LANGUAGES :
continue
if len(code) <= 20 or len(code) > 100000 :
continue
if language not in tmp :
tmp[language] = []
ratio = matcher.getratio(code)
tmp[language].append(ratio)
print('Matching patterns %d/%d ' % (index2, len(codedata)), end='\r')
ret[pattern] = {}
for language, data in tmp.items() :
ret[pattern][language] = sum(tmp[language]) / max(len(tmp[language]), 1)
print('Matching patterns completed ')
return ret | [
"def",
"match_patterns",
"(",
"codedata",
")",
":",
"ret",
"=",
"{",
"}",
"for",
"index1",
",",
"pattern",
"in",
"enumerate",
"(",
"shaman",
".",
"PatternMatcher",
".",
"PATTERNS",
")",
":",
"print",
"(",
"'Matching pattern %d \"%s\"'",
"%",
"(",
"index1",
... | Match patterns by shaman.PatternMatcher
Get average ratio of pattern and language | [
"Match",
"patterns",
"by",
"shaman",
".",
"PatternMatcher",
"Get",
"average",
"ratio",
"of",
"pattern",
"and",
"language"
] | 82891c17c6302f7f9881a215789856d460a85f9c | https://github.com/Prev/shaman/blob/82891c17c6302f7f9881a215789856d460a85f9c/shamanld/trainer.py#L101-L135 | train | 55,397 |
codeforamerica/epa_python | epa/radinfo/radinfo.py | RADInfo.facility | def facility(self, column=None, value=None, **kwargs):
"""
Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA')
"""
return self._resolve_call('RAD_FACILITY', column, value, **kwargs) | python | def facility(self, column=None, value=None, **kwargs):
"""
Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA')
"""
return self._resolve_call('RAD_FACILITY', column, value, **kwargs) | [
"def",
"facility",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'RAD_FACILITY'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs",
")"
] | Check information related to Radiation facilities.
>>> RADInfo().facility('state_code', 'CA') | [
"Check",
"information",
"related",
"to",
"Radiation",
"facilities",
"."
] | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/radinfo/radinfo.py#L23-L29 | train | 55,398 |
codeforamerica/epa_python | epa/radinfo/radinfo.py | RADInfo.geo | def geo(self, column=None, value=None, **kwargs):
"""
Locate a facility through geographic location.
>>> RADInfo().geo('geometric_type_code', '001')
"""
return self._resolve_call('RAD_GEO_LOCATION', column, value, **kwargs) | python | def geo(self, column=None, value=None, **kwargs):
"""
Locate a facility through geographic location.
>>> RADInfo().geo('geometric_type_code', '001')
"""
return self._resolve_call('RAD_GEO_LOCATION', column, value, **kwargs) | [
"def",
"geo",
"(",
"self",
",",
"column",
"=",
"None",
",",
"value",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"self",
".",
"_resolve_call",
"(",
"'RAD_GEO_LOCATION'",
",",
"column",
",",
"value",
",",
"*",
"*",
"kwargs",
")"
] | Locate a facility through geographic location.
>>> RADInfo().geo('geometric_type_code', '001') | [
"Locate",
"a",
"facility",
"through",
"geographic",
"location",
"."
] | 62a53da62936bea8daa487a01a52b973e9062b2c | https://github.com/codeforamerica/epa_python/blob/62a53da62936bea8daa487a01a52b973e9062b2c/epa/radinfo/radinfo.py#L41-L47 | train | 55,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.