repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
klen/aioauth-client
aioauth_client.py
Client.user_info
async def user_info(self, loop=None, **kwargs): """Load user information from provider.""" if not self.user_info_url: raise NotImplementedError( 'The provider doesnt support user_info method.') data = await self.request('GET', self.user_info_url, loop=loop, **kwargs) user = User(**dict(self.user_parse(data))) return user, data
python
async def user_info(self, loop=None, **kwargs): """Load user information from provider.""" if not self.user_info_url: raise NotImplementedError( 'The provider doesnt support user_info method.') data = await self.request('GET', self.user_info_url, loop=loop, **kwargs) user = User(**dict(self.user_parse(data))) return user, data
[ "async", "def", "user_info", "(", "self", ",", "loop", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "self", ".", "user_info_url", ":", "raise", "NotImplementedError", "(", "'The provider doesnt support user_info method.'", ")", "data", "=", "aw...
Load user information from provider.
[ "Load", "user", "information", "from", "provider", "." ]
54f58249496c26965adb4f752f2b24cfe18d0084
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L172-L180
train
208,500
klen/aioauth-client
aioauth_client.py
OAuth1Client.request
def request(self, method, url, params=None, **aio_kwargs): """Make a request to provider.""" oparams = { 'oauth_consumer_key': self.consumer_key, 'oauth_nonce': sha1(str(RANDOM()).encode('ascii')).hexdigest(), 'oauth_signature_method': self.signature.name, 'oauth_timestamp': str(int(time.time())), 'oauth_version': self.version, } oparams.update(params or {}) if self.oauth_token: oparams['oauth_token'] = self.oauth_token url = self._get_url(url) if urlsplit(url).query: raise ValueError( 'Request parameters should be in the "params" parameter, ' 'not inlined in the URL') oparams['oauth_signature'] = self.signature.sign( self.consumer_secret, method, url, oauth_token_secret=self.oauth_token_secret, **oparams) self.logger.debug("%s %s", url, oparams) return self._request(method, url, params=oparams, **aio_kwargs)
python
def request(self, method, url, params=None, **aio_kwargs): """Make a request to provider.""" oparams = { 'oauth_consumer_key': self.consumer_key, 'oauth_nonce': sha1(str(RANDOM()).encode('ascii')).hexdigest(), 'oauth_signature_method': self.signature.name, 'oauth_timestamp': str(int(time.time())), 'oauth_version': self.version, } oparams.update(params or {}) if self.oauth_token: oparams['oauth_token'] = self.oauth_token url = self._get_url(url) if urlsplit(url).query: raise ValueError( 'Request parameters should be in the "params" parameter, ' 'not inlined in the URL') oparams['oauth_signature'] = self.signature.sign( self.consumer_secret, method, url, oauth_token_secret=self.oauth_token_secret, **oparams) self.logger.debug("%s %s", url, oparams) return self._request(method, url, params=oparams, **aio_kwargs)
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "params", "=", "None", ",", "*", "*", "aio_kwargs", ")", ":", "oparams", "=", "{", "'oauth_consumer_key'", ":", "self", ".", "consumer_key", ",", "'oauth_nonce'", ":", "sha1", "(", "str", "...
Make a request to provider.
[ "Make", "a", "request", "to", "provider", "." ]
54f58249496c26965adb4f752f2b24cfe18d0084
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L220-L246
train
208,501
klen/aioauth-client
aioauth_client.py
OAuth1Client.get_request_token
async def get_request_token(self, loop=None, **params): """Get a request_token and request_token_secret from OAuth1 provider.""" params = dict(self.params, **params) data = await self.request('GET', self.request_token_url, params=params, loop=loop) self.oauth_token = data.get('oauth_token') self.oauth_token_secret = data.get('oauth_token_secret') return self.oauth_token, self.oauth_token_secret, data
python
async def get_request_token(self, loop=None, **params): """Get a request_token and request_token_secret from OAuth1 provider.""" params = dict(self.params, **params) data = await self.request('GET', self.request_token_url, params=params, loop=loop) self.oauth_token = data.get('oauth_token') self.oauth_token_secret = data.get('oauth_token_secret') return self.oauth_token, self.oauth_token_secret, data
[ "async", "def", "get_request_token", "(", "self", ",", "loop", "=", "None", ",", "*", "*", "params", ")", ":", "params", "=", "dict", "(", "self", ".", "params", ",", "*", "*", "params", ")", "data", "=", "await", "self", ".", "request", "(", "'GET...
Get a request_token and request_token_secret from OAuth1 provider.
[ "Get", "a", "request_token", "and", "request_token_secret", "from", "OAuth1", "provider", "." ]
54f58249496c26965adb4f752f2b24cfe18d0084
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L248-L255
train
208,502
klen/aioauth-client
aioauth_client.py
OAuth1Client.get_access_token
async def get_access_token(self, oauth_verifier, request_token=None, loop=None, **params): """Get access_token from OAuth1 provider. :returns: (access_token, access_token_secret, provider_data) """ # Possibility to provide REQUEST DATA to the method if not isinstance(oauth_verifier, str) and self.shared_key in oauth_verifier: oauth_verifier = oauth_verifier[self.shared_key] if request_token and self.oauth_token != request_token: raise web.HTTPBadRequest( reason='Failed to obtain OAuth 1.0 access token. ' 'Request token is invalid') data = await self.request('POST', self.access_token_url, params={ 'oauth_verifier': oauth_verifier, 'oauth_token': request_token}, loop=loop) self.oauth_token = data.get('oauth_token') self.oauth_token_secret = data.get('oauth_token_secret') return self.oauth_token, self.oauth_token_secret, data
python
async def get_access_token(self, oauth_verifier, request_token=None, loop=None, **params): """Get access_token from OAuth1 provider. :returns: (access_token, access_token_secret, provider_data) """ # Possibility to provide REQUEST DATA to the method if not isinstance(oauth_verifier, str) and self.shared_key in oauth_verifier: oauth_verifier = oauth_verifier[self.shared_key] if request_token and self.oauth_token != request_token: raise web.HTTPBadRequest( reason='Failed to obtain OAuth 1.0 access token. ' 'Request token is invalid') data = await self.request('POST', self.access_token_url, params={ 'oauth_verifier': oauth_verifier, 'oauth_token': request_token}, loop=loop) self.oauth_token = data.get('oauth_token') self.oauth_token_secret = data.get('oauth_token_secret') return self.oauth_token, self.oauth_token_secret, data
[ "async", "def", "get_access_token", "(", "self", ",", "oauth_verifier", ",", "request_token", "=", "None", ",", "loop", "=", "None", ",", "*", "*", "params", ")", ":", "# Possibility to provide REQUEST DATA to the method", "if", "not", "isinstance", "(", "oauth_ve...
Get access_token from OAuth1 provider. :returns: (access_token, access_token_secret, provider_data)
[ "Get", "access_token", "from", "OAuth1", "provider", "." ]
54f58249496c26965adb4f752f2b24cfe18d0084
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L257-L277
train
208,503
klen/aioauth-client
aioauth_client.py
OAuth2Client.get_authorize_url
def get_authorize_url(self, **params): """Return formatted authorize URL.""" params = dict(self.params, **params) params.update({'client_id': self.client_id, 'response_type': 'code'}) return self.authorize_url + '?' + urlencode(params)
python
def get_authorize_url(self, **params): """Return formatted authorize URL.""" params = dict(self.params, **params) params.update({'client_id': self.client_id, 'response_type': 'code'}) return self.authorize_url + '?' + urlencode(params)
[ "def", "get_authorize_url", "(", "self", ",", "*", "*", "params", ")", ":", "params", "=", "dict", "(", "self", ".", "params", ",", "*", "*", "params", ")", "params", ".", "update", "(", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'respo...
Return formatted authorize URL.
[ "Return", "formatted", "authorize", "URL", "." ]
54f58249496c26965adb4f752f2b24cfe18d0084
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L300-L304
train
208,504
klen/aioauth-client
aioauth_client.py
OAuth2Client.request
def request(self, method, url, params=None, headers=None, access_token=None, **aio_kwargs): """Request OAuth2 resource.""" url = self._get_url(url) params = params or {} access_token = access_token or self.access_token if access_token: if isinstance(params, list): if self.access_token_key not in dict(params): params.append((self.access_token_key, access_token)) else: params[self.access_token_key] = access_token headers = headers or { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8', } return self._request(method, url, params=params, headers=headers, **aio_kwargs)
python
def request(self, method, url, params=None, headers=None, access_token=None, **aio_kwargs): """Request OAuth2 resource.""" url = self._get_url(url) params = params or {} access_token = access_token or self.access_token if access_token: if isinstance(params, list): if self.access_token_key not in dict(params): params.append((self.access_token_key, access_token)) else: params[self.access_token_key] = access_token headers = headers or { 'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8', } return self._request(method, url, params=params, headers=headers, **aio_kwargs)
[ "def", "request", "(", "self", ",", "method", ",", "url", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "access_token", "=", "None", ",", "*", "*", "aio_kwargs", ")", ":", "url", "=", "self", ".", "_get_url", "(", "url", ")", "param...
Request OAuth2 resource.
[ "Request", "OAuth2", "resource", "." ]
54f58249496c26965adb4f752f2b24cfe18d0084
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L306-L324
train
208,505
klen/aioauth-client
aioauth_client.py
OAuth2Client.get_access_token
async def get_access_token(self, code, loop=None, redirect_uri=None, **payload): """Get an access_token from OAuth provider. :returns: (access_token, provider_data) """ # Possibility to provide REQUEST DATA to the method payload.setdefault('grant_type', 'authorization_code') payload.update({'client_id': self.client_id, 'client_secret': self.client_secret}) if not isinstance(code, str) and self.shared_key in code: code = code[self.shared_key] payload['refresh_token' if payload['grant_type'] == 'refresh_token' else 'code'] = code redirect_uri = redirect_uri or self.params.get('redirect_uri') if redirect_uri: payload['redirect_uri'] = redirect_uri self.access_token = None data = await self.request('POST', self.access_token_url, data=payload, loop=loop) try: self.access_token = data['access_token'] except KeyError: self.logger.error( 'Error when getting the access token.\nData returned by OAuth server: %r', data, ) raise web.HTTPBadRequest(reason='Failed to obtain OAuth access token.') return self.access_token, data
python
async def get_access_token(self, code, loop=None, redirect_uri=None, **payload): """Get an access_token from OAuth provider. :returns: (access_token, provider_data) """ # Possibility to provide REQUEST DATA to the method payload.setdefault('grant_type', 'authorization_code') payload.update({'client_id': self.client_id, 'client_secret': self.client_secret}) if not isinstance(code, str) and self.shared_key in code: code = code[self.shared_key] payload['refresh_token' if payload['grant_type'] == 'refresh_token' else 'code'] = code redirect_uri = redirect_uri or self.params.get('redirect_uri') if redirect_uri: payload['redirect_uri'] = redirect_uri self.access_token = None data = await self.request('POST', self.access_token_url, data=payload, loop=loop) try: self.access_token = data['access_token'] except KeyError: self.logger.error( 'Error when getting the access token.\nData returned by OAuth server: %r', data, ) raise web.HTTPBadRequest(reason='Failed to obtain OAuth access token.') return self.access_token, data
[ "async", "def", "get_access_token", "(", "self", ",", "code", ",", "loop", "=", "None", ",", "redirect_uri", "=", "None", ",", "*", "*", "payload", ")", ":", "# Possibility to provide REQUEST DATA to the method", "payload", ".", "setdefault", "(", "'grant_type'", ...
Get an access_token from OAuth provider. :returns: (access_token, provider_data)
[ "Get", "an", "access_token", "from", "OAuth", "provider", "." ]
54f58249496c26965adb4f752f2b24cfe18d0084
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L326-L356
train
208,506
klen/aioauth-client
aioauth_client.py
FacebookClient.user_info
async def user_info(self, params=None, **kwargs): """Facebook required fields-param.""" params = params or {} params[ 'fields'] = 'id,email,first_name,last_name,name,link,locale,' \ 'gender,location' return await super(FacebookClient, self).user_info(params=params, **kwargs)
python
async def user_info(self, params=None, **kwargs): """Facebook required fields-param.""" params = params or {} params[ 'fields'] = 'id,email,first_name,last_name,name,link,locale,' \ 'gender,location' return await super(FacebookClient, self).user_info(params=params, **kwargs)
[ "async", "def", "user_info", "(", "self", ",", "params", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "params", "or", "{", "}", "params", "[", "'fields'", "]", "=", "'id,email,first_name,last_name,name,link,locale,'", "'gender,location'", "re...
Facebook required fields-param.
[ "Facebook", "required", "fields", "-", "param", "." ]
54f58249496c26965adb4f752f2b24cfe18d0084
https://github.com/klen/aioauth-client/blob/54f58249496c26965adb4f752f2b24cfe18d0084/aioauth_client.py#L745-L751
train
208,507
arteria/django-hijack
hijack/decorators.py
hijack_require_http_methods
def hijack_require_http_methods(fn): """ Wrapper for "require_http_methods" decorator. POST required by default, GET can optionally be allowed """ required_methods = ['POST'] if hijack_settings.HIJACK_ALLOW_GET_REQUESTS: required_methods.append('GET') return require_http_methods(required_methods)(fn)
python
def hijack_require_http_methods(fn): """ Wrapper for "require_http_methods" decorator. POST required by default, GET can optionally be allowed """ required_methods = ['POST'] if hijack_settings.HIJACK_ALLOW_GET_REQUESTS: required_methods.append('GET') return require_http_methods(required_methods)(fn)
[ "def", "hijack_require_http_methods", "(", "fn", ")", ":", "required_methods", "=", "[", "'POST'", "]", "if", "hijack_settings", ".", "HIJACK_ALLOW_GET_REQUESTS", ":", "required_methods", ".", "append", "(", "'GET'", ")", "return", "require_http_methods", "(", "requ...
Wrapper for "require_http_methods" decorator. POST required by default, GET can optionally be allowed
[ "Wrapper", "for", "require_http_methods", "decorator", ".", "POST", "required", "by", "default", "GET", "can", "optionally", "be", "allowed" ]
64a3a1dd0655d9fee9786d62628add132073b946
https://github.com/arteria/django-hijack/blob/64a3a1dd0655d9fee9786d62628add132073b946/hijack/decorators.py#L16-L23
train
208,508
arteria/django-hijack
hijack/helpers.py
is_authorized_default
def is_authorized_default(hijacker, hijacked): """Checks if the user has the correct permission to Hijack another user. By default only superusers are allowed to hijack. An exception is made to allow staff members to hijack when HIJACK_AUTHORIZE_STAFF is enabled in the Django settings. By default it prevents staff users from hijacking other staff users. This can be disabled by enabling the HIJACK_AUTHORIZE_STAFF_TO_HIJACK_STAFF setting in the Django settings. Staff users can never hijack superusers. """ if hijacker.is_superuser: return True if hijacked.is_superuser: return False if hijacker.is_staff and hijack_settings.HIJACK_AUTHORIZE_STAFF: if hijacked.is_staff and not hijack_settings.HIJACK_AUTHORIZE_STAFF_TO_HIJACK_STAFF: return False return True return False
python
def is_authorized_default(hijacker, hijacked): """Checks if the user has the correct permission to Hijack another user. By default only superusers are allowed to hijack. An exception is made to allow staff members to hijack when HIJACK_AUTHORIZE_STAFF is enabled in the Django settings. By default it prevents staff users from hijacking other staff users. This can be disabled by enabling the HIJACK_AUTHORIZE_STAFF_TO_HIJACK_STAFF setting in the Django settings. Staff users can never hijack superusers. """ if hijacker.is_superuser: return True if hijacked.is_superuser: return False if hijacker.is_staff and hijack_settings.HIJACK_AUTHORIZE_STAFF: if hijacked.is_staff and not hijack_settings.HIJACK_AUTHORIZE_STAFF_TO_HIJACK_STAFF: return False return True return False
[ "def", "is_authorized_default", "(", "hijacker", ",", "hijacked", ")", ":", "if", "hijacker", ".", "is_superuser", ":", "return", "True", "if", "hijacked", ".", "is_superuser", ":", "return", "False", "if", "hijacker", ".", "is_staff", "and", "hijack_settings", ...
Checks if the user has the correct permission to Hijack another user. By default only superusers are allowed to hijack. An exception is made to allow staff members to hijack when HIJACK_AUTHORIZE_STAFF is enabled in the Django settings. By default it prevents staff users from hijacking other staff users. This can be disabled by enabling the HIJACK_AUTHORIZE_STAFF_TO_HIJACK_STAFF setting in the Django settings. Staff users can never hijack superusers.
[ "Checks", "if", "the", "user", "has", "the", "correct", "permission", "to", "Hijack", "another", "user", "." ]
64a3a1dd0655d9fee9786d62628add132073b946
https://github.com/arteria/django-hijack/blob/64a3a1dd0655d9fee9786d62628add132073b946/hijack/helpers.py#L77-L103
train
208,509
arteria/django-hijack
hijack/helpers.py
is_authorized
def is_authorized(hijack, hijacked): ''' Evaluates the authorization check specified in settings ''' authorization_check = import_string(hijack_settings.HIJACK_AUTHORIZATION_CHECK) return authorization_check(hijack, hijacked)
python
def is_authorized(hijack, hijacked): ''' Evaluates the authorization check specified in settings ''' authorization_check = import_string(hijack_settings.HIJACK_AUTHORIZATION_CHECK) return authorization_check(hijack, hijacked)
[ "def", "is_authorized", "(", "hijack", ",", "hijacked", ")", ":", "authorization_check", "=", "import_string", "(", "hijack_settings", ".", "HIJACK_AUTHORIZATION_CHECK", ")", "return", "authorization_check", "(", "hijack", ",", "hijacked", ")" ]
Evaluates the authorization check specified in settings
[ "Evaluates", "the", "authorization", "check", "specified", "in", "settings" ]
64a3a1dd0655d9fee9786d62628add132073b946
https://github.com/arteria/django-hijack/blob/64a3a1dd0655d9fee9786d62628add132073b946/hijack/helpers.py#L106-L111
train
208,510
floydwch/kaggle-cli
kaggle_cli/download.py
Download.is_downloadable
def is_downloadable(self, response): ''' Checks whether the response object is a html page or a likely downloadable file. Intended to detect error pages or prompts such as kaggle's competition rules acceptance prompt. Returns True if the response is a html page. False otherwise. ''' content_type = response.headers.get('Content-Type', '') content_disp = response.headers.get('Content-Disposition', '') if 'text/html' in content_type and 'attachment' not in content_disp: # This response is a html file # which is not marked as an attachment, # so we likely hit a rules acceptance prompt return False return True
python
def is_downloadable(self, response): ''' Checks whether the response object is a html page or a likely downloadable file. Intended to detect error pages or prompts such as kaggle's competition rules acceptance prompt. Returns True if the response is a html page. False otherwise. ''' content_type = response.headers.get('Content-Type', '') content_disp = response.headers.get('Content-Disposition', '') if 'text/html' in content_type and 'attachment' not in content_disp: # This response is a html file # which is not marked as an attachment, # so we likely hit a rules acceptance prompt return False return True
[ "def", "is_downloadable", "(", "self", ",", "response", ")", ":", "content_type", "=", "response", ".", "headers", ".", "get", "(", "'Content-Type'", ",", "''", ")", "content_disp", "=", "response", ".", "headers", ".", "get", "(", "'Content-Disposition'", "...
Checks whether the response object is a html page or a likely downloadable file. Intended to detect error pages or prompts such as kaggle's competition rules acceptance prompt. Returns True if the response is a html page. False otherwise.
[ "Checks", "whether", "the", "response", "object", "is", "a", "html", "page", "or", "a", "likely", "downloadable", "file", ".", "Intended", "to", "detect", "error", "pages", "or", "prompts", "such", "as", "kaggle", "s", "competition", "rules", "acceptance", "...
3f6071f1feddb08d6babfcdfd5a90496ae8b26e6
https://github.com/floydwch/kaggle-cli/blob/3f6071f1feddb08d6babfcdfd5a90496ae8b26e6/kaggle_cli/download.py#L124-L142
train
208,511
rbw/pysnow
pysnow/legacy_request.py
LegacyRequest.count
def count(self): """ Returns the number of records the query would yield""" self.request_params.update({'sysparm_count': True}) response = self.session.get(self._get_stats_url(), params=self._get_formatted_query(fields=list(), limit=None, order_by=list(), offset=None)) content = self._get_content(response) return int(content['stats']['count'])
python
def count(self): """ Returns the number of records the query would yield""" self.request_params.update({'sysparm_count': True}) response = self.session.get(self._get_stats_url(), params=self._get_formatted_query(fields=list(), limit=None, order_by=list(), offset=None)) content = self._get_content(response) return int(content['stats']['count'])
[ "def", "count", "(", "self", ")", ":", "self", ".", "request_params", ".", "update", "(", "{", "'sysparm_count'", ":", "True", "}", ")", "response", "=", "self", ".", "session", ".", "get", "(", "self", ".", "_get_stats_url", "(", ")", ",", "params", ...
Returns the number of records the query would yield
[ "Returns", "the", "number", "of", "records", "the", "query", "would", "yield" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/legacy_request.py#L62-L73
train
208,512
rbw/pysnow
pysnow/legacy_request.py
LegacyRequest._all_inner
def _all_inner(self, fields, limit, order_by, offset): """Yields all records for the query and follows links if present on the response after validating :return: List of records with content """ response = self.session.get(self._get_table_url(), params=self._get_formatted_query(fields, limit, order_by, offset)) yield self._get_content(response) while 'next' in response.links: self.url_link = response.links['next']['url'] response = self.session.get(self.url_link) yield self._get_content(response)
python
def _all_inner(self, fields, limit, order_by, offset): """Yields all records for the query and follows links if present on the response after validating :return: List of records with content """ response = self.session.get(self._get_table_url(), params=self._get_formatted_query(fields, limit, order_by, offset)) yield self._get_content(response) while 'next' in response.links: self.url_link = response.links['next']['url'] response = self.session.get(self.url_link) yield self._get_content(response)
[ "def", "_all_inner", "(", "self", ",", "fields", ",", "limit", ",", "order_by", ",", "offset", ")", ":", "response", "=", "self", ".", "session", ".", "get", "(", "self", ".", "_get_table_url", "(", ")", ",", "params", "=", "self", ".", "_get_formatted...
Yields all records for the query and follows links if present on the response after validating :return: List of records with content
[ "Yields", "all", "records", "for", "the", "query", "and", "follows", "links", "if", "present", "on", "the", "response", "after", "validating" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/legacy_request.py#L83-L95
train
208,513
rbw/pysnow
pysnow/legacy_request.py
LegacyRequest.get_one
def get_one(self, fields=list()): """Convenience function for queries returning only one result. Validates response before returning. :param fields: List of fields to return in the result :raise: :MultipleResults: if more than one match is found :return: - Record content """ response = self.session.get(self._get_table_url(), params=self._get_formatted_query(fields, limit=None, order_by=list(), offset=None)) content = self._get_content(response) l = len(content) if l > 1: raise MultipleResults('Multiple results for get_one()') if len(content) == 0: return {} return content[0]
python
def get_one(self, fields=list()): """Convenience function for queries returning only one result. Validates response before returning. :param fields: List of fields to return in the result :raise: :MultipleResults: if more than one match is found :return: - Record content """ response = self.session.get(self._get_table_url(), params=self._get_formatted_query(fields, limit=None, order_by=list(), offset=None)) content = self._get_content(response) l = len(content) if l > 1: raise MultipleResults('Multiple results for get_one()') if len(content) == 0: return {} return content[0]
[ "def", "get_one", "(", "self", ",", "fields", "=", "list", "(", ")", ")", ":", "response", "=", "self", ".", "session", ".", "get", "(", "self", ".", "_get_table_url", "(", ")", ",", "params", "=", "self", ".", "_get_formatted_query", "(", "fields", ...
Convenience function for queries returning only one result. Validates response before returning. :param fields: List of fields to return in the result :raise: :MultipleResults: if more than one match is found :return: - Record content
[ "Convenience", "function", "for", "queries", "returning", "only", "one", "result", ".", "Validates", "response", "before", "returning", "." ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/legacy_request.py#L120-L140
train
208,514
rbw/pysnow
pysnow/legacy_request.py
LegacyRequest.insert
def insert(self, payload): """Inserts a new record with the payload passed as an argument :param payload: The record to create (dict) :return: - Created record """ response = self.session.post(self._get_table_url(), data=json.dumps(payload)) return self._get_content(response)
python
def insert(self, payload): """Inserts a new record with the payload passed as an argument :param payload: The record to create (dict) :return: - Created record """ response = self.session.post(self._get_table_url(), data=json.dumps(payload)) return self._get_content(response)
[ "def", "insert", "(", "self", ",", "payload", ")", ":", "response", "=", "self", ".", "session", ".", "post", "(", "self", ".", "_get_table_url", "(", ")", ",", "data", "=", "json", ".", "dumps", "(", "payload", ")", ")", "return", "self", ".", "_g...
Inserts a new record with the payload passed as an argument :param payload: The record to create (dict) :return: - Created record
[ "Inserts", "a", "new", "record", "with", "the", "payload", "passed", "as", "an", "argument" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/legacy_request.py#L142-L150
train
208,515
rbw/pysnow
pysnow/legacy_request.py
LegacyRequest.delete
def delete(self): """Deletes the queried record and returns response content after response validation :raise: :NoResults: if query returned no results :NotImplementedError: if query returned more than one result (currently not supported) :return: - Delete response content (Generally always {'Success': True}) """ try: result = self.get_one() if 'sys_id' not in result: raise NoResults() except MultipleResults: raise MultipleResults("Deletion of multiple records is not supported") except NoResults as e: e.args = ('Cannot delete a non-existing record',) raise response = self.session.delete(self._get_table_url(sys_id=result['sys_id'])) return self._get_content(response)
python
def delete(self): """Deletes the queried record and returns response content after response validation :raise: :NoResults: if query returned no results :NotImplementedError: if query returned more than one result (currently not supported) :return: - Delete response content (Generally always {'Success': True}) """ try: result = self.get_one() if 'sys_id' not in result: raise NoResults() except MultipleResults: raise MultipleResults("Deletion of multiple records is not supported") except NoResults as e: e.args = ('Cannot delete a non-existing record',) raise response = self.session.delete(self._get_table_url(sys_id=result['sys_id'])) return self._get_content(response)
[ "def", "delete", "(", "self", ")", ":", "try", ":", "result", "=", "self", ".", "get_one", "(", ")", "if", "'sys_id'", "not", "in", "result", ":", "raise", "NoResults", "(", ")", "except", "MultipleResults", ":", "raise", "MultipleResults", "(", "\"Delet...
Deletes the queried record and returns response content after response validation :raise: :NoResults: if query returned no results :NotImplementedError: if query returned more than one result (currently not supported) :return: - Delete response content (Generally always {'Success': True})
[ "Deletes", "the", "queried", "record", "and", "returns", "response", "content", "after", "response", "validation" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/legacy_request.py#L152-L172
train
208,516
rbw/pysnow
pysnow/legacy_request.py
LegacyRequest.update
def update(self, payload): """Updates the queried record with `payload` and returns the updated record after validating the response :param payload: Payload to update the record with :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :return: - The updated record """ try: result = self.get_one() if 'sys_id' not in result: raise NoResults() except MultipleResults: raise MultipleResults("Update of multiple records is not supported") except NoResults as e: e.args = ('Cannot update a non-existing record',) raise if not isinstance(payload, dict): raise InvalidUsage("Update payload must be of type dict") response = self.session.put(self._get_table_url(sys_id=result['sys_id']), data=json.dumps(payload)) return self._get_content(response)
python
def update(self, payload): """Updates the queried record with `payload` and returns the updated record after validating the response :param payload: Payload to update the record with :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :return: - The updated record """ try: result = self.get_one() if 'sys_id' not in result: raise NoResults() except MultipleResults: raise MultipleResults("Update of multiple records is not supported") except NoResults as e: e.args = ('Cannot update a non-existing record',) raise if not isinstance(payload, dict): raise InvalidUsage("Update payload must be of type dict") response = self.session.put(self._get_table_url(sys_id=result['sys_id']), data=json.dumps(payload)) return self._get_content(response)
[ "def", "update", "(", "self", ",", "payload", ")", ":", "try", ":", "result", "=", "self", ".", "get_one", "(", ")", "if", "'sys_id'", "not", "in", "result", ":", "raise", "NoResults", "(", ")", "except", "MultipleResults", ":", "raise", "MultipleResults...
Updates the queried record with `payload` and returns the updated record after validating the response :param payload: Payload to update the record with :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :return: - The updated record
[ "Updates", "the", "queried", "record", "with", "payload", "and", "returns", "the", "updated", "record", "after", "validating", "the", "response" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/legacy_request.py#L174-L198
train
208,517
rbw/pysnow
pysnow/legacy_request.py
LegacyRequest.clone
def clone(self, reset_fields=list()): """Clones the queried record :param reset_fields: Fields to reset :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :UnexpectedResponse: informs the user about what likely went wrong :return: - The cloned record """ if not isinstance(reset_fields, list): raise InvalidUsage("reset_fields must be a `list` of fields") try: response = self.get_one() if 'sys_id' not in response: raise NoResults() except MultipleResults: raise MultipleResults('Cloning multiple records is not supported') except NoResults as e: e.args = ('Cannot clone a non-existing record',) raise payload = {} # Iterate over fields in the result for field in response: # Ignore fields in reset_fields if field in reset_fields: continue item = response[field] # Check if the item is of type dict and has a sys_id ref (value) if isinstance(item, dict) and 'value' in item: payload[field] = item['value'] else: payload[field] = item try: return self.insert(payload) except UnexpectedResponse as e: if e.status_code == 403: # User likely attempted to clone a record without resetting a unique field e.args = ('Unable to create clone. Make sure unique fields has been reset.',) raise
python
def clone(self, reset_fields=list()): """Clones the queried record :param reset_fields: Fields to reset :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :UnexpectedResponse: informs the user about what likely went wrong :return: - The cloned record """ if not isinstance(reset_fields, list): raise InvalidUsage("reset_fields must be a `list` of fields") try: response = self.get_one() if 'sys_id' not in response: raise NoResults() except MultipleResults: raise MultipleResults('Cloning multiple records is not supported') except NoResults as e: e.args = ('Cannot clone a non-existing record',) raise payload = {} # Iterate over fields in the result for field in response: # Ignore fields in reset_fields if field in reset_fields: continue item = response[field] # Check if the item is of type dict and has a sys_id ref (value) if isinstance(item, dict) and 'value' in item: payload[field] = item['value'] else: payload[field] = item try: return self.insert(payload) except UnexpectedResponse as e: if e.status_code == 403: # User likely attempted to clone a record without resetting a unique field e.args = ('Unable to create clone. Make sure unique fields has been reset.',) raise
[ "def", "clone", "(", "self", ",", "reset_fields", "=", "list", "(", ")", ")", ":", "if", "not", "isinstance", "(", "reset_fields", ",", "list", ")", ":", "raise", "InvalidUsage", "(", "\"reset_fields must be a `list` of fields\"", ")", "try", ":", "response", ...
Clones the queried record :param reset_fields: Fields to reset :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :UnexpectedResponse: informs the user about what likely went wrong :return: - The cloned record
[ "Clones", "the", "queried", "record" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/legacy_request.py#L200-L246
train
208,518
rbw/pysnow
pysnow/legacy_request.py
LegacyRequest.attach
def attach(self, file): """Attaches the queried record with `file` and returns the response after validating the response :param file: File to attach to the record :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :return: - The attachment record metadata """ try: result = self.get_one() if 'sys_id' not in result: raise NoResults() except MultipleResults: raise MultipleResults('Attaching a file to multiple records is not supported') except NoResults: raise NoResults('Attempted to attach file to a non-existing record') if not os.path.isfile(file): raise InvalidUsage("Attachment '%s' must be an existing regular file" % file) response = self.session.post( self._get_attachment_url('upload'), data={ 'table_name': self.table, 'table_sys_id': result['sys_id'], 'file_name': ntpath.basename(file) }, files={'file': open(file, 'rb')}, headers={'content-type': None} # Temporarily override header ) return self._get_content(response)
python
def attach(self, file): """Attaches the queried record with `file` and returns the response after validating the response :param file: File to attach to the record :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :return: - The attachment record metadata """ try: result = self.get_one() if 'sys_id' not in result: raise NoResults() except MultipleResults: raise MultipleResults('Attaching a file to multiple records is not supported') except NoResults: raise NoResults('Attempted to attach file to a non-existing record') if not os.path.isfile(file): raise InvalidUsage("Attachment '%s' must be an existing regular file" % file) response = self.session.post( self._get_attachment_url('upload'), data={ 'table_name': self.table, 'table_sys_id': result['sys_id'], 'file_name': ntpath.basename(file) }, files={'file': open(file, 'rb')}, headers={'content-type': None} # Temporarily override header ) return self._get_content(response)
[ "def", "attach", "(", "self", ",", "file", ")", ":", "try", ":", "result", "=", "self", ".", "get_one", "(", ")", "if", "'sys_id'", "not", "in", "result", ":", "raise", "NoResults", "(", ")", "except", "MultipleResults", ":", "raise", "MultipleResults", ...
Attaches the queried record with `file` and returns the response after validating the response :param file: File to attach to the record :raise: :NoResults: if query returned no results :MultipleResults: if query returned more than one result (currently not supported) :return: - The attachment record metadata
[ "Attaches", "the", "queried", "record", "with", "file", "and", "returns", "the", "response", "after", "validating", "the", "response" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/legacy_request.py#L248-L280
train
208,519
rbw/pysnow
pysnow/legacy_request.py
LegacyRequest._get_content
def _get_content(self, response): """Checks for errors in the response. Returns response content, in bytes. :param response: response object :raise: :UnexpectedResponse: if the server responded with an unexpected response :return: - ServiceNow response content """ method = response.request.method self.last_response = response server_error = { 'summary': None, 'details': None } try: content_json = response.json() if 'error' in content_json: e = content_json['error'] if 'message' in e: server_error['summary'] = e['message'] if 'detail' in e: server_error['details'] = e['detail'] except ValueError: content_json = {} if method == 'DELETE': # Make sure the delete operation returned the expected response if response.status_code == 204: return {'success': True} else: raise UnexpectedResponse( 204, response.status_code, method, server_error['summary'], server_error['details'] ) # Make sure the POST operation returned the expected response elif method == 'POST' and response.status_code != 201: raise UnexpectedResponse( 201, response.status_code, method, server_error['summary'], server_error['details'] ) # It seems that Helsinki and later returns status 200 instead of 404 on empty result sets if ('result' in content_json and len(content_json['result']) == 0) or response.status_code == 404: if self.raise_on_empty is True: raise NoResults('Query yielded no results') elif 'error' in content_json: raise UnexpectedResponse( 200, response.status_code, method, server_error['summary'], server_error['details'] ) if 'result' not in content_json: raise MissingResult("The request was successful but the content didn't contain the expected 'result'") return content_json['result']
python
def _get_content(self, response): """Checks for errors in the response. Returns response content, in bytes. :param response: response object :raise: :UnexpectedResponse: if the server responded with an unexpected response :return: - ServiceNow response content """ method = response.request.method self.last_response = response server_error = { 'summary': None, 'details': None } try: content_json = response.json() if 'error' in content_json: e = content_json['error'] if 'message' in e: server_error['summary'] = e['message'] if 'detail' in e: server_error['details'] = e['detail'] except ValueError: content_json = {} if method == 'DELETE': # Make sure the delete operation returned the expected response if response.status_code == 204: return {'success': True} else: raise UnexpectedResponse( 204, response.status_code, method, server_error['summary'], server_error['details'] ) # Make sure the POST operation returned the expected response elif method == 'POST' and response.status_code != 201: raise UnexpectedResponse( 201, response.status_code, method, server_error['summary'], server_error['details'] ) # It seems that Helsinki and later returns status 200 instead of 404 on empty result sets if ('result' in content_json and len(content_json['result']) == 0) or response.status_code == 404: if self.raise_on_empty is True: raise NoResults('Query yielded no results') elif 'error' in content_json: raise UnexpectedResponse( 200, response.status_code, method, server_error['summary'], server_error['details'] ) if 'result' not in content_json: raise MissingResult("The request was successful but the content didn't contain the expected 'result'") return content_json['result']
[ "def", "_get_content", "(", "self", ",", "response", ")", ":", "method", "=", "response", ".", "request", ".", "method", "self", ".", "last_response", "=", "response", "server_error", "=", "{", "'summary'", ":", "None", ",", "'details'", ":", "None", "}", ...
Checks for errors in the response. Returns response content, in bytes. :param response: response object :raise: :UnexpectedResponse: if the server responded with an unexpected response :return: - ServiceNow response content
[ "Checks", "for", "errors", "in", "the", "response", ".", "Returns", "response", "content", "in", "bytes", "." ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/legacy_request.py#L282-L338
train
208,520
rbw/pysnow
pysnow/client.py
Client._get_session
def _get_session(self, session): """Creates a new session with basic auth, unless one was provided, and sets headers. :param session: (optional) Session to re-use :return: - :class:`requests.Session` object """ if not session: logger.debug('(SESSION_CREATE) User: %s' % self._user) s = requests.Session() s.auth = HTTPBasicAuth(self._user, self._password) else: logger.debug('(SESSION_CREATE) Object: %s' % session) s = session s.headers.update( { 'content-type': 'application/json', 'accept': 'application/json', 'User-Agent': 'pysnow/%s' % pysnow.__version__ } ) return s
python
def _get_session(self, session): """Creates a new session with basic auth, unless one was provided, and sets headers. :param session: (optional) Session to re-use :return: - :class:`requests.Session` object """ if not session: logger.debug('(SESSION_CREATE) User: %s' % self._user) s = requests.Session() s.auth = HTTPBasicAuth(self._user, self._password) else: logger.debug('(SESSION_CREATE) Object: %s' % session) s = session s.headers.update( { 'content-type': 'application/json', 'accept': 'application/json', 'User-Agent': 'pysnow/%s' % pysnow.__version__ } ) return s
[ "def", "_get_session", "(", "self", ",", "session", ")", ":", "if", "not", "session", ":", "logger", ".", "debug", "(", "'(SESSION_CREATE) User: %s'", "%", "self", ".", "_user", ")", "s", "=", "requests", ".", "Session", "(", ")", "s", ".", "auth", "="...
Creates a new session with basic auth, unless one was provided, and sets headers. :param session: (optional) Session to re-use :return: - :class:`requests.Session` object
[ "Creates", "a", "new", "session", "with", "basic", "auth", "unless", "one", "was", "provided", "and", "sets", "headers", "." ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/client.py#L91-L115
train
208,521
rbw/pysnow
pysnow/request.py
SnowRequest.get
def get(self, *args, **kwargs): """Fetches one or more records :return: - :class:`pysnow.Response` object """ self._parameters.query = kwargs.pop('query', {}) if len(args) == 0 else args[0] self._parameters.limit = kwargs.pop('limit', 10000) self._parameters.offset = kwargs.pop('offset', 0) self._parameters.fields = kwargs.pop('fields', kwargs.pop('fields', [])) return self._get_response('GET', stream=kwargs.pop('stream', False))
python
def get(self, *args, **kwargs): """Fetches one or more records :return: - :class:`pysnow.Response` object """ self._parameters.query = kwargs.pop('query', {}) if len(args) == 0 else args[0] self._parameters.limit = kwargs.pop('limit', 10000) self._parameters.offset = kwargs.pop('offset', 0) self._parameters.fields = kwargs.pop('fields', kwargs.pop('fields', [])) return self._get_response('GET', stream=kwargs.pop('stream', False))
[ "def", "get", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "_parameters", ".", "query", "=", "kwargs", ".", "pop", "(", "'query'", ",", "{", "}", ")", "if", "len", "(", "args", ")", "==", "0", "else", "args", ...
Fetches one or more records :return: - :class:`pysnow.Response` object
[ "Fetches", "one", "or", "more", "records" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/request.py#L51-L63
train
208,522
rbw/pysnow
pysnow/request.py
SnowRequest.update
def update(self, query, payload): """Updates a record :param query: Dictionary, string or :class:`QueryBuilder` object :param payload: Dictionary payload :return: - Dictionary of the updated record """ if not isinstance(payload, dict): raise InvalidUsage("Update payload must be of type dict") record = self.get(query).one() self._url = self._url_builder.get_appended_custom("/{0}".format(record['sys_id'])) return self._get_response('PUT', data=json.dumps(payload))
python
def update(self, query, payload): """Updates a record :param query: Dictionary, string or :class:`QueryBuilder` object :param payload: Dictionary payload :return: - Dictionary of the updated record """ if not isinstance(payload, dict): raise InvalidUsage("Update payload must be of type dict") record = self.get(query).one() self._url = self._url_builder.get_appended_custom("/{0}".format(record['sys_id'])) return self._get_response('PUT', data=json.dumps(payload))
[ "def", "update", "(", "self", ",", "query", ",", "payload", ")", ":", "if", "not", "isinstance", "(", "payload", ",", "dict", ")", ":", "raise", "InvalidUsage", "(", "\"Update payload must be of type dict\"", ")", "record", "=", "self", ".", "get", "(", "q...
Updates a record :param query: Dictionary, string or :class:`QueryBuilder` object :param payload: Dictionary payload :return: - Dictionary of the updated record
[ "Updates", "a", "record" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/request.py#L75-L90
train
208,523
rbw/pysnow
pysnow/request.py
SnowRequest.delete
def delete(self, query): """Deletes a record :param query: Dictionary, string or :class:`QueryBuilder` object :return: - Dictionary containing status of the delete operation """ record = self.get(query=query).one() self._url = self._url_builder.get_appended_custom("/{0}".format(record['sys_id'])) return self._get_response('DELETE').one()
python
def delete(self, query): """Deletes a record :param query: Dictionary, string or :class:`QueryBuilder` object :return: - Dictionary containing status of the delete operation """ record = self.get(query=query).one() self._url = self._url_builder.get_appended_custom("/{0}".format(record['sys_id'])) return self._get_response('DELETE').one()
[ "def", "delete", "(", "self", ",", "query", ")", ":", "record", "=", "self", ".", "get", "(", "query", "=", "query", ")", ".", "one", "(", ")", "self", ".", "_url", "=", "self", ".", "_url_builder", ".", "get_appended_custom", "(", "\"/{0}\"", ".", ...
Deletes a record :param query: Dictionary, string or :class:`QueryBuilder` object :return: - Dictionary containing status of the delete operation
[ "Deletes", "a", "record" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/request.py#L92-L103
train
208,524
rbw/pysnow
pysnow/request.py
SnowRequest.custom
def custom(self, method, path_append=None, headers=None, **kwargs): """Creates a custom request :param method: HTTP method :param path_append: (optional) append path to resource.api_path :param headers: (optional) Dictionary of headers to add or override :param kwargs: kwargs to pass along to :class:`requests.Request` :return: - :class:`pysnow.Response` object """ if headers: self._session.headers.update(headers) if path_append is not None: try: self._url = self._url_builder.get_appended_custom(path_append) except InvalidUsage: raise InvalidUsage("Argument 'path_append' must be a string in the following format: " "/path-to-append[/.../...]") return self._get_response(method, **kwargs)
python
def custom(self, method, path_append=None, headers=None, **kwargs): """Creates a custom request :param method: HTTP method :param path_append: (optional) append path to resource.api_path :param headers: (optional) Dictionary of headers to add or override :param kwargs: kwargs to pass along to :class:`requests.Request` :return: - :class:`pysnow.Response` object """ if headers: self._session.headers.update(headers) if path_append is not None: try: self._url = self._url_builder.get_appended_custom(path_append) except InvalidUsage: raise InvalidUsage("Argument 'path_append' must be a string in the following format: " "/path-to-append[/.../...]") return self._get_response(method, **kwargs)
[ "def", "custom", "(", "self", ",", "method", ",", "path_append", "=", "None", ",", "headers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "headers", ":", "self", ".", "_session", ".", "headers", ".", "update", "(", "headers", ")", "if", "...
Creates a custom request :param method: HTTP method :param path_append: (optional) append path to resource.api_path :param headers: (optional) Dictionary of headers to add or override :param kwargs: kwargs to pass along to :class:`requests.Request` :return: - :class:`pysnow.Response` object
[ "Creates", "a", "custom", "request" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/request.py#L105-L126
train
208,525
rbw/pysnow
pysnow/resource.py
Resource.attachments
def attachments(self): """Provides an `Attachment` API for this resource. Enables easy listing, deleting and creating new attachments. :return: Attachment object """ resource = copy(self) resource._url_builder = URLBuilder(self._base_url, self._base_path, '/attachment') path = self._api_path.strip('/').split('/') if path[0] != 'table': raise InvalidUsage('The attachment API can only be used with the table API') return Attachment(resource, path[1])
python
def attachments(self): """Provides an `Attachment` API for this resource. Enables easy listing, deleting and creating new attachments. :return: Attachment object """ resource = copy(self) resource._url_builder = URLBuilder(self._base_url, self._base_path, '/attachment') path = self._api_path.strip('/').split('/') if path[0] != 'table': raise InvalidUsage('The attachment API can only be used with the table API') return Attachment(resource, path[1])
[ "def", "attachments", "(", "self", ")", ":", "resource", "=", "copy", "(", "self", ")", "resource", ".", "_url_builder", "=", "URLBuilder", "(", "self", ".", "_base_url", ",", "self", ".", "_base_path", ",", "'/attachment'", ")", "path", "=", "self", "."...
Provides an `Attachment` API for this resource. Enables easy listing, deleting and creating new attachments. :return: Attachment object
[ "Provides", "an", "Attachment", "API", "for", "this", "resource", ".", "Enables", "easy", "listing", "deleting", "and", "creating", "new", "attachments", "." ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/resource.py#L51-L66
train
208,526
rbw/pysnow
pysnow/resource.py
Resource.request
def request(self, method, path_append=None, headers=None, **kwargs): """Create a custom request :param method: HTTP method to use :param path_append: (optional) relative to :attr:`api_path` :param headers: (optional) Dictionary of headers to add or override :param kwargs: kwargs to pass along to :class:`requests.Request` :return: - :class:`Response` object """ return self._request.custom(method, path_append=path_append, headers=headers, **kwargs)
python
def request(self, method, path_append=None, headers=None, **kwargs): """Create a custom request :param method: HTTP method to use :param path_append: (optional) relative to :attr:`api_path` :param headers: (optional) Dictionary of headers to add or override :param kwargs: kwargs to pass along to :class:`requests.Request` :return: - :class:`Response` object """ return self._request.custom(method, path_append=path_append, headers=headers, **kwargs)
[ "def", "request", "(", "self", ",", "method", ",", "path_append", "=", "None", ",", "headers", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_request", ".", "custom", "(", "method", ",", "path_append", "=", "path_append", ",",...
Create a custom request :param method: HTTP method to use :param path_append: (optional) relative to :attr:`api_path` :param headers: (optional) Dictionary of headers to add or override :param kwargs: kwargs to pass along to :class:`requests.Request` :return: - :class:`Response` object
[ "Create", "a", "custom", "request" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/resource.py#L139-L150
train
208,527
rbw/pysnow
pysnow/response.py
Response._get_buffered_response
def _get_buffered_response(self): """Returns a buffered response :return: Buffered response """ response = self._get_response() if response.request.method == 'DELETE' and response.status_code == 204: return [{'status': 'record deleted'}], 1 result = self._response.json().get('result', None) if result is None: raise MissingResult('The expected `result` key was missing in the response. Cannot continue') length = 0 if isinstance(result, list): length = len(result) elif isinstance(result, dict): result = [result] length = 1 return result, length
python
def _get_buffered_response(self): """Returns a buffered response :return: Buffered response """ response = self._get_response() if response.request.method == 'DELETE' and response.status_code == 204: return [{'status': 'record deleted'}], 1 result = self._response.json().get('result', None) if result is None: raise MissingResult('The expected `result` key was missing in the response. Cannot continue') length = 0 if isinstance(result, list): length = len(result) elif isinstance(result, dict): result = [result] length = 1 return result, length
[ "def", "_get_buffered_response", "(", "self", ")", ":", "response", "=", "self", ".", "_get_response", "(", ")", "if", "response", ".", "request", ".", "method", "==", "'DELETE'", "and", "response", ".", "status_code", "==", "204", ":", "return", "[", "{",...
Returns a buffered response :return: Buffered response
[ "Returns", "a", "buffered", "response" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/response.py#L127-L151
train
208,528
rbw/pysnow
pysnow/response.py
Response.all
def all(self): """Returns a chained generator response containing all matching records :return: - Iterable response """ if self._stream: return chain.from_iterable(self._get_streamed_response()) return self._get_buffered_response()[0]
python
def all(self): """Returns a chained generator response containing all matching records :return: - Iterable response """ if self._stream: return chain.from_iterable(self._get_streamed_response()) return self._get_buffered_response()[0]
[ "def", "all", "(", "self", ")", ":", "if", "self", ".", "_stream", ":", "return", "chain", ".", "from_iterable", "(", "self", ".", "_get_streamed_response", "(", ")", ")", "return", "self", ".", "_get_buffered_response", "(", ")", "[", "0", "]" ]
Returns a chained generator response containing all matching records :return: - Iterable response
[ "Returns", "a", "chained", "generator", "response", "containing", "all", "matching", "records" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/response.py#L153-L163
train
208,529
rbw/pysnow
pysnow/response.py
Response.first
def first(self): """Return the first record or raise an exception if the result doesn't contain any data :return: - Dictionary containing the first item in the response content :raise: - NoResults: If no results were found """ if not self._stream: raise InvalidUsage('first() is only available when stream=True') try: content = next(self.all()) except StopIteration: raise NoResults("No records found") return content
python
def first(self): """Return the first record or raise an exception if the result doesn't contain any data :return: - Dictionary containing the first item in the response content :raise: - NoResults: If no results were found """ if not self._stream: raise InvalidUsage('first() is only available when stream=True') try: content = next(self.all()) except StopIteration: raise NoResults("No records found") return content
[ "def", "first", "(", "self", ")", ":", "if", "not", "self", ".", "_stream", ":", "raise", "InvalidUsage", "(", "'first() is only available when stream=True'", ")", "try", ":", "content", "=", "next", "(", "self", ".", "all", "(", ")", ")", "except", "StopI...
Return the first record or raise an exception if the result doesn't contain any data :return: - Dictionary containing the first item in the response content :raise: - NoResults: If no results were found
[ "Return", "the", "first", "record", "or", "raise", "an", "exception", "if", "the", "result", "doesn", "t", "contain", "any", "data" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/response.py#L165-L183
train
208,530
rbw/pysnow
pysnow/response.py
Response.one
def one(self): """Return exactly one record or raise an exception. :return: - Dictionary containing the only item in the response content :raise: - MultipleResults: If more than one records are present in the content - NoResults: If the result is empty """ result, count = self._get_buffered_response() if count == 0: raise NoResults("No records found") elif count > 1: raise MultipleResults("Expected single-record result, got multiple") return result[0]
python
def one(self): """Return exactly one record or raise an exception. :return: - Dictionary containing the only item in the response content :raise: - MultipleResults: If more than one records are present in the content - NoResults: If the result is empty """ result, count = self._get_buffered_response() if count == 0: raise NoResults("No records found") elif count > 1: raise MultipleResults("Expected single-record result, got multiple") return result[0]
[ "def", "one", "(", "self", ")", ":", "result", ",", "count", "=", "self", ".", "_get_buffered_response", "(", ")", "if", "count", "==", "0", ":", "raise", "NoResults", "(", "\"No records found\"", ")", "elif", "count", ">", "1", ":", "raise", "MultipleRe...
Return exactly one record or raise an exception. :return: - Dictionary containing the only item in the response content :raise: - MultipleResults: If more than one records are present in the content - NoResults: If the result is empty
[ "Return", "exactly", "one", "record", "or", "raise", "an", "exception", "." ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/response.py#L197-L215
train
208,531
rbw/pysnow
pysnow/response.py
Response.upload
def upload(self, *args, **kwargs): """Convenience method for attaching files to a fetched record :param args: args to pass along to `Attachment.upload` :param kwargs: kwargs to pass along to `Attachment.upload` :return: upload response object """ return self._resource.attachments.upload(self['sys_id'], *args, **kwargs)
python
def upload(self, *args, **kwargs): """Convenience method for attaching files to a fetched record :param args: args to pass along to `Attachment.upload` :param kwargs: kwargs to pass along to `Attachment.upload` :return: upload response object """ return self._resource.attachments.upload(self['sys_id'], *args, **kwargs)
[ "def", "upload", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_resource", ".", "attachments", ".", "upload", "(", "self", "[", "'sys_id'", "]", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Convenience method for attaching files to a fetched record :param args: args to pass along to `Attachment.upload` :param kwargs: kwargs to pass along to `Attachment.upload` :return: upload response object
[ "Convenience", "method", "for", "attaching", "files", "to", "a", "fetched", "record" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/response.py#L249-L257
train
208,532
rbw/pysnow
pysnow/attachment.py
Attachment.get
def get(self, sys_id=None, limit=100): """Returns a list of attachments :param sys_id: record sys_id to list attachments for :param limit: override the default limit of 100 :return: list of attachments """ if sys_id: return self.resource.get(query={'table_sys_id': sys_id, 'table_name': self.table_name}).all() return self.resource.get(query={'table_name': self.table_name}, limit=limit).all()
python
def get(self, sys_id=None, limit=100): """Returns a list of attachments :param sys_id: record sys_id to list attachments for :param limit: override the default limit of 100 :return: list of attachments """ if sys_id: return self.resource.get(query={'table_sys_id': sys_id, 'table_name': self.table_name}).all() return self.resource.get(query={'table_name': self.table_name}, limit=limit).all()
[ "def", "get", "(", "self", ",", "sys_id", "=", "None", ",", "limit", "=", "100", ")", ":", "if", "sys_id", ":", "return", "self", ".", "resource", ".", "get", "(", "query", "=", "{", "'table_sys_id'", ":", "sys_id", ",", "'table_name'", ":", "self", ...
Returns a list of attachments :param sys_id: record sys_id to list attachments for :param limit: override the default limit of 100 :return: list of attachments
[ "Returns", "a", "list", "of", "attachments" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/attachment.py#L18-L29
train
208,533
rbw/pysnow
pysnow/attachment.py
Attachment.upload
def upload(self, sys_id, file_path, name=None, multipart=False): """Attaches a new file to the provided record :param sys_id: the sys_id of the record to attach the file to :param file_path: local absolute path of the file to upload :param name: custom name for the uploaded file (instead of basename) :param multipart: whether or not to use multipart :return: the inserted record """ if not isinstance(multipart, bool): raise InvalidUsage('Multipart must be of type bool') resource = self.resource if name is None: name = os.path.basename(file_path) resource.parameters.add_custom({ 'table_name': self.table_name, 'table_sys_id': sys_id, 'file_name': name }) data = open(file_path, 'rb').read() headers = {} if multipart: headers["Content-Type"] = "multipart/form-data" path_append = '/upload' else: headers["Content-Type"] = "text/plain" path_append = '/file' return resource.request(method='POST', data=data, headers=headers, path_append=path_append)
python
def upload(self, sys_id, file_path, name=None, multipart=False): """Attaches a new file to the provided record :param sys_id: the sys_id of the record to attach the file to :param file_path: local absolute path of the file to upload :param name: custom name for the uploaded file (instead of basename) :param multipart: whether or not to use multipart :return: the inserted record """ if not isinstance(multipart, bool): raise InvalidUsage('Multipart must be of type bool') resource = self.resource if name is None: name = os.path.basename(file_path) resource.parameters.add_custom({ 'table_name': self.table_name, 'table_sys_id': sys_id, 'file_name': name }) data = open(file_path, 'rb').read() headers = {} if multipart: headers["Content-Type"] = "multipart/form-data" path_append = '/upload' else: headers["Content-Type"] = "text/plain" path_append = '/file' return resource.request(method='POST', data=data, headers=headers, path_append=path_append)
[ "def", "upload", "(", "self", ",", "sys_id", ",", "file_path", ",", "name", "=", "None", ",", "multipart", "=", "False", ")", ":", "if", "not", "isinstance", "(", "multipart", ",", "bool", ")", ":", "raise", "InvalidUsage", "(", "'Multipart must be of type...
Attaches a new file to the provided record :param sys_id: the sys_id of the record to attach the file to :param file_path: local absolute path of the file to upload :param name: custom name for the uploaded file (instead of basename) :param multipart: whether or not to use multipart :return: the inserted record
[ "Attaches", "a", "new", "file", "to", "the", "provided", "record" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/attachment.py#L31-L65
train
208,534
rbw/pysnow
pysnow/url_builder.py
URLBuilder.validate_path
def validate_path(path): """Validates the provided path :param path: path to validate (string) :raise: :InvalidUsage: If validation fails. """ if not isinstance(path, six.string_types) or not re.match('^/(?:[._a-zA-Z0-9-]/?)+[^/]$', path): raise InvalidUsage( "Path validation failed - Expected: '/<component>[/component], got: %s" % path ) return True
python
def validate_path(path): """Validates the provided path :param path: path to validate (string) :raise: :InvalidUsage: If validation fails. """ if not isinstance(path, six.string_types) or not re.match('^/(?:[._a-zA-Z0-9-]/?)+[^/]$', path): raise InvalidUsage( "Path validation failed - Expected: '/<component>[/component], got: %s" % path ) return True
[ "def", "validate_path", "(", "path", ")", ":", "if", "not", "isinstance", "(", "path", ",", "six", ".", "string_types", ")", "or", "not", "re", ".", "match", "(", "'^/(?:[._a-zA-Z0-9-]/?)+[^/]$'", ",", "path", ")", ":", "raise", "InvalidUsage", "(", "\"Pat...
Validates the provided path :param path: path to validate (string) :raise: :InvalidUsage: If validation fails.
[ "Validates", "the", "provided", "path" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/url_builder.py#L23-L36
train
208,535
rbw/pysnow
pysnow/url_builder.py
URLBuilder.get_base_url
def get_base_url(use_ssl, instance=None, host=None): """Formats the base URL either `host` or `instance` :return: Base URL string """ if instance is not None: host = ("%s.service-now.com" % instance).rstrip('/') if use_ssl is True: return "https://%s" % host return "http://%s" % host
python
def get_base_url(use_ssl, instance=None, host=None): """Formats the base URL either `host` or `instance` :return: Base URL string """ if instance is not None: host = ("%s.service-now.com" % instance).rstrip('/') if use_ssl is True: return "https://%s" % host return "http://%s" % host
[ "def", "get_base_url", "(", "use_ssl", ",", "instance", "=", "None", ",", "host", "=", "None", ")", ":", "if", "instance", "is", "not", "None", ":", "host", "=", "(", "\"%s.service-now.com\"", "%", "instance", ")", ".", "rstrip", "(", "'/'", ")", "if",...
Formats the base URL either `host` or `instance` :return: Base URL string
[ "Formats", "the", "base", "URL", "either", "host", "or", "instance" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/url_builder.py#L39-L51
train
208,536
rbw/pysnow
pysnow/oauth_client.py
OAuthClient._get_oauth_session
def _get_oauth_session(self): """Creates a new OAuth session :return: - OAuth2Session object """ return self._get_session( OAuth2Session( client_id=self.client_id, token=self.token, token_updater=self.token_updater, auto_refresh_url=self.token_url, auto_refresh_kwargs={ "client_id": self.client_id, "client_secret": self.client_secret } ) )
python
def _get_oauth_session(self): """Creates a new OAuth session :return: - OAuth2Session object """ return self._get_session( OAuth2Session( client_id=self.client_id, token=self.token, token_updater=self.token_updater, auto_refresh_url=self.token_url, auto_refresh_kwargs={ "client_id": self.client_id, "client_secret": self.client_secret } ) )
[ "def", "_get_oauth_session", "(", "self", ")", ":", "return", "self", ".", "_get_session", "(", "OAuth2Session", "(", "client_id", "=", "self", ".", "client_id", ",", "token", "=", "self", ".", "token", ",", "token_updater", "=", "self", ".", "token_updater"...
Creates a new OAuth session :return: - OAuth2Session object
[ "Creates", "a", "new", "OAuth", "session" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/oauth_client.py#L48-L66
train
208,537
rbw/pysnow
pysnow/oauth_client.py
OAuthClient.set_token
def set_token(self, token): """Validate and set token :param token: the token (dict) to set """ if not token: self.token = None return expected_keys = ['token_type', 'refresh_token', 'access_token', 'scope', 'expires_in', 'expires_at'] if not isinstance(token, dict) or not set(token) >= set(expected_keys): raise InvalidUsage("Expected a token dictionary containing the following keys: {0}" .format(expected_keys)) # Set sanitized token self.token = dict((k, v) for k, v in token.items() if k in expected_keys)
python
def set_token(self, token): """Validate and set token :param token: the token (dict) to set """ if not token: self.token = None return expected_keys = ['token_type', 'refresh_token', 'access_token', 'scope', 'expires_in', 'expires_at'] if not isinstance(token, dict) or not set(token) >= set(expected_keys): raise InvalidUsage("Expected a token dictionary containing the following keys: {0}" .format(expected_keys)) # Set sanitized token self.token = dict((k, v) for k, v in token.items() if k in expected_keys)
[ "def", "set_token", "(", "self", ",", "token", ")", ":", "if", "not", "token", ":", "self", ".", "token", "=", "None", "return", "expected_keys", "=", "[", "'token_type'", ",", "'refresh_token'", ",", "'access_token'", ",", "'scope'", ",", "'expires_in'", ...
Validate and set token :param token: the token (dict) to set
[ "Validate", "and", "set", "token" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/oauth_client.py#L68-L84
train
208,538
rbw/pysnow
pysnow/oauth_client.py
OAuthClient.generate_token
def generate_token(self, user, password): """Takes user and password credentials and generates a new token :param user: user :param password: password :return: - dictionary containing token data :raises: - TokenCreateError: If there was an error generating the new token """ logger.debug('(TOKEN_CREATE) :: User: %s' % user) session = OAuth2Session(client=LegacyApplicationClient(client_id=self.client_id)) try: return dict(session.fetch_token(token_url=self.token_url, username=user, password=password, client_id=self.client_id, client_secret=self.client_secret)) except OAuth2Error as exception: raise TokenCreateError('Error creating user token', exception.description, exception.status_code)
python
def generate_token(self, user, password): """Takes user and password credentials and generates a new token :param user: user :param password: password :return: - dictionary containing token data :raises: - TokenCreateError: If there was an error generating the new token """ logger.debug('(TOKEN_CREATE) :: User: %s' % user) session = OAuth2Session(client=LegacyApplicationClient(client_id=self.client_id)) try: return dict(session.fetch_token(token_url=self.token_url, username=user, password=password, client_id=self.client_id, client_secret=self.client_secret)) except OAuth2Error as exception: raise TokenCreateError('Error creating user token', exception.description, exception.status_code)
[ "def", "generate_token", "(", "self", ",", "user", ",", "password", ")", ":", "logger", ".", "debug", "(", "'(TOKEN_CREATE) :: User: %s'", "%", "user", ")", "session", "=", "OAuth2Session", "(", "client", "=", "LegacyApplicationClient", "(", "client_id", "=", ...
Takes user and password credentials and generates a new token :param user: user :param password: password :return: - dictionary containing token data :raises: - TokenCreateError: If there was an error generating the new token
[ "Takes", "user", "and", "password", "credentials", "and", "generates", "a", "new", "token" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/oauth_client.py#L121-L143
train
208,539
rbw/pysnow
pysnow/query_builder.py
QueryBuilder.order_descending
def order_descending(self): """Sets ordering of field descending""" self._query.append('ORDERBYDESC{0}'.format(self.current_field)) self.c_oper = inspect.currentframe().f_back.f_code.co_name return self
python
def order_descending(self): """Sets ordering of field descending""" self._query.append('ORDERBYDESC{0}'.format(self.current_field)) self.c_oper = inspect.currentframe().f_back.f_code.co_name return self
[ "def", "order_descending", "(", "self", ")", ":", "self", ".", "_query", ".", "append", "(", "'ORDERBYDESC{0}'", ".", "format", "(", "self", ".", "current_field", ")", ")", "self", ".", "c_oper", "=", "inspect", ".", "currentframe", "(", ")", ".", "f_bac...
Sets ordering of field descending
[ "Sets", "ordering", "of", "field", "descending" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/query_builder.py#L31-L36
train
208,540
rbw/pysnow
pysnow/query_builder.py
QueryBuilder.equals
def equals(self, data): """Adds new `IN` or `=` condition depending on if a list or string was provided :param data: string or list of values :raise: - QueryTypeError: if `data` is of an unexpected type """ if isinstance(data, six.string_types): return self._add_condition('=', data, types=[int, str]) elif isinstance(data, list): return self._add_condition('IN', ",".join(map(str, data)), types=[str]) raise QueryTypeError('Expected value of type `str` or `list`, not %s' % type(data))
python
def equals(self, data): """Adds new `IN` or `=` condition depending on if a list or string was provided :param data: string or list of values :raise: - QueryTypeError: if `data` is of an unexpected type """ if isinstance(data, six.string_types): return self._add_condition('=', data, types=[int, str]) elif isinstance(data, list): return self._add_condition('IN', ",".join(map(str, data)), types=[str]) raise QueryTypeError('Expected value of type `str` or `list`, not %s' % type(data))
[ "def", "equals", "(", "self", ",", "data", ")", ":", "if", "isinstance", "(", "data", ",", "six", ".", "string_types", ")", ":", "return", "self", ".", "_add_condition", "(", "'='", ",", "data", ",", "types", "=", "[", "int", ",", "str", "]", ")", ...
Adds new `IN` or `=` condition depending on if a list or string was provided :param data: string or list of values :raise: - QueryTypeError: if `data` is of an unexpected type
[ "Adds", "new", "IN", "or", "=", "condition", "depending", "on", "if", "a", "list", "or", "string", "was", "provided" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/query_builder.py#L87-L100
train
208,541
rbw/pysnow
pysnow/query_builder.py
QueryBuilder.greater_than
def greater_than(self, greater_than): """Adds new `>` condition :param greater_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime) :raise: - QueryTypeError: if `greater_than` is of an unexpected type """ if hasattr(greater_than, 'strftime'): greater_than = datetime_as_utc(greater_than).strftime('%Y-%m-%d %H:%M:%S') elif isinstance(greater_than, six.string_types): raise QueryTypeError('Expected value of type `int` or instance of `datetime`, not %s' % type(greater_than)) return self._add_condition('>', greater_than, types=[int, str])
python
def greater_than(self, greater_than): """Adds new `>` condition :param greater_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime) :raise: - QueryTypeError: if `greater_than` is of an unexpected type """ if hasattr(greater_than, 'strftime'): greater_than = datetime_as_utc(greater_than).strftime('%Y-%m-%d %H:%M:%S') elif isinstance(greater_than, six.string_types): raise QueryTypeError('Expected value of type `int` or instance of `datetime`, not %s' % type(greater_than)) return self._add_condition('>', greater_than, types=[int, str])
[ "def", "greater_than", "(", "self", ",", "greater_than", ")", ":", "if", "hasattr", "(", "greater_than", ",", "'strftime'", ")", ":", "greater_than", "=", "datetime_as_utc", "(", "greater_than", ")", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", "elif", "i...
Adds new `>` condition :param greater_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime) :raise: - QueryTypeError: if `greater_than` is of an unexpected type
[ "Adds", "new", ">", "condition" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/query_builder.py#L117-L130
train
208,542
rbw/pysnow
pysnow/query_builder.py
QueryBuilder.less_than
def less_than(self, less_than): """Adds new `<` condition :param less_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime) :raise: - QueryTypeError: if `less_than` is of an unexpected type """ if hasattr(less_than, 'strftime'): less_than = datetime_as_utc(less_than).strftime('%Y-%m-%d %H:%M:%S') elif isinstance(less_than, six.string_types): raise QueryTypeError('Expected value of type `int` or instance of `datetime`, not %s' % type(less_than)) return self._add_condition('<', less_than, types=[int, str])
python
def less_than(self, less_than): """Adds new `<` condition :param less_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime) :raise: - QueryTypeError: if `less_than` is of an unexpected type """ if hasattr(less_than, 'strftime'): less_than = datetime_as_utc(less_than).strftime('%Y-%m-%d %H:%M:%S') elif isinstance(less_than, six.string_types): raise QueryTypeError('Expected value of type `int` or instance of `datetime`, not %s' % type(less_than)) return self._add_condition('<', less_than, types=[int, str])
[ "def", "less_than", "(", "self", ",", "less_than", ")", ":", "if", "hasattr", "(", "less_than", ",", "'strftime'", ")", ":", "less_than", "=", "datetime_as_utc", "(", "less_than", ")", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", "elif", "isinstance", "...
Adds new `<` condition :param less_than: str or datetime compatible object (naive UTC datetime or tz-aware datetime) :raise: - QueryTypeError: if `less_than` is of an unexpected type
[ "Adds", "new", "<", "condition" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/query_builder.py#L132-L145
train
208,543
rbw/pysnow
pysnow/query_builder.py
QueryBuilder.between
def between(self, start, end): """Adds new `BETWEEN` condition :param start: int or datetime compatible object (in SNOW user's timezone) :param end: int or datetime compatible object (in SNOW user's timezone) :raise: - QueryTypeError: if start or end arguments is of an invalid type """ if hasattr(start, 'strftime') and hasattr(end, 'strftime'): dt_between = ( 'javascript:gs.dateGenerate("%(start)s")' "@" 'javascript:gs.dateGenerate("%(end)s")' ) % { 'start': start.strftime('%Y-%m-%d %H:%M:%S'), 'end': end.strftime('%Y-%m-%d %H:%M:%S') } elif isinstance(start, int) and isinstance(end, int): dt_between = '%d@%d' % (start, end) else: raise QueryTypeError("Expected `start` and `end` of type `int` " "or instance of `datetime`, not %s and %s" % (type(start), type(end))) return self._add_condition('BETWEEN', dt_between, types=[str])
python
def between(self, start, end): """Adds new `BETWEEN` condition :param start: int or datetime compatible object (in SNOW user's timezone) :param end: int or datetime compatible object (in SNOW user's timezone) :raise: - QueryTypeError: if start or end arguments is of an invalid type """ if hasattr(start, 'strftime') and hasattr(end, 'strftime'): dt_between = ( 'javascript:gs.dateGenerate("%(start)s")' "@" 'javascript:gs.dateGenerate("%(end)s")' ) % { 'start': start.strftime('%Y-%m-%d %H:%M:%S'), 'end': end.strftime('%Y-%m-%d %H:%M:%S') } elif isinstance(start, int) and isinstance(end, int): dt_between = '%d@%d' % (start, end) else: raise QueryTypeError("Expected `start` and `end` of type `int` " "or instance of `datetime`, not %s and %s" % (type(start), type(end))) return self._add_condition('BETWEEN', dt_between, types=[str])
[ "def", "between", "(", "self", ",", "start", ",", "end", ")", ":", "if", "hasattr", "(", "start", ",", "'strftime'", ")", "and", "hasattr", "(", "end", ",", "'strftime'", ")", ":", "dt_between", "=", "(", "'javascript:gs.dateGenerate(\"%(start)s\")'", "\"@\"...
Adds new `BETWEEN` condition :param start: int or datetime compatible object (in SNOW user's timezone) :param end: int or datetime compatible object (in SNOW user's timezone) :raise: - QueryTypeError: if start or end arguments is of an invalid type
[ "Adds", "new", "BETWEEN", "condition" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/query_builder.py#L147-L171
train
208,544
rbw/pysnow
pysnow/query_builder.py
QueryBuilder._add_condition
def _add_condition(self, operator, operand, types): """Appends condition to self._query after performing validation :param operator: operator (str) :param operand: operand :param types: allowed types :raise: - QueryMissingField: if a field hasn't been set - QueryMultipleExpressions: if a condition already has been set - QueryTypeError: if the value is of an unexpected type """ if not self.current_field: raise QueryMissingField("Conditions requires a field()") elif not type(operand) in types: caller = inspect.currentframe().f_back.f_code.co_name raise QueryTypeError("Invalid type passed to %s() , expected: %s" % (caller, types)) elif self.c_oper: raise QueryMultipleExpressions("Expected logical operator after expression") self.c_oper = inspect.currentframe().f_back.f_code.co_name self._query.append("%(current_field)s%(operator)s%(operand)s" % { 'current_field': self.current_field, 'operator': operator, 'operand': operand }) return self
python
def _add_condition(self, operator, operand, types): """Appends condition to self._query after performing validation :param operator: operator (str) :param operand: operand :param types: allowed types :raise: - QueryMissingField: if a field hasn't been set - QueryMultipleExpressions: if a condition already has been set - QueryTypeError: if the value is of an unexpected type """ if not self.current_field: raise QueryMissingField("Conditions requires a field()") elif not type(operand) in types: caller = inspect.currentframe().f_back.f_code.co_name raise QueryTypeError("Invalid type passed to %s() , expected: %s" % (caller, types)) elif self.c_oper: raise QueryMultipleExpressions("Expected logical operator after expression") self.c_oper = inspect.currentframe().f_back.f_code.co_name self._query.append("%(current_field)s%(operator)s%(operand)s" % { 'current_field': self.current_field, 'operator': operator, 'operand': operand }) return self
[ "def", "_add_condition", "(", "self", ",", "operator", ",", "operand", ",", "types", ")", ":", "if", "not", "self", ".", "current_field", ":", "raise", "QueryMissingField", "(", "\"Conditions requires a field()\"", ")", "elif", "not", "type", "(", "operand", "...
Appends condition to self._query after performing validation :param operator: operator (str) :param operand: operand :param types: allowed types :raise: - QueryMissingField: if a field hasn't been set - QueryMultipleExpressions: if a condition already has been set - QueryTypeError: if the value is of an unexpected type
[ "Appends", "condition", "to", "self", ".", "_query", "after", "performing", "validation" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/query_builder.py#L185-L215
train
208,545
rbw/pysnow
pysnow/query_builder.py
QueryBuilder._add_logical_operator
def _add_logical_operator(self, operator): """Adds a logical operator in query :param operator: logical operator (str) :raise: - QueryExpressionError: if a expression hasn't been set """ if not self.c_oper: raise QueryExpressionError("Logical operators must be preceded by an expression") self.current_field = None self.c_oper = None self.l_oper = inspect.currentframe().f_back.f_code.co_name self._query.append(operator) return self
python
def _add_logical_operator(self, operator): """Adds a logical operator in query :param operator: logical operator (str) :raise: - QueryExpressionError: if a expression hasn't been set """ if not self.c_oper: raise QueryExpressionError("Logical operators must be preceded by an expression") self.current_field = None self.c_oper = None self.l_oper = inspect.currentframe().f_back.f_code.co_name self._query.append(operator) return self
[ "def", "_add_logical_operator", "(", "self", ",", "operator", ")", ":", "if", "not", "self", ".", "c_oper", ":", "raise", "QueryExpressionError", "(", "\"Logical operators must be preceded by an expression\"", ")", "self", ".", "current_field", "=", "None", "self", ...
Adds a logical operator in query :param operator: logical operator (str) :raise: - QueryExpressionError: if a expression hasn't been set
[ "Adds", "a", "logical", "operator", "in", "query" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/query_builder.py#L217-L233
train
208,546
rbw/pysnow
pysnow/params_builder.py
ParamsBuilder.add_custom
def add_custom(self, params): """Adds new custom parameter after making sure it's of type dict. :param params: Dictionary containing one or more parameters """ if isinstance(params, dict) is False: raise InvalidUsage("custom parameters must be of type `dict`") self._custom_params.update(params)
python
def add_custom(self, params): """Adds new custom parameter after making sure it's of type dict. :param params: Dictionary containing one or more parameters """ if isinstance(params, dict) is False: raise InvalidUsage("custom parameters must be of type `dict`") self._custom_params.update(params)
[ "def", "add_custom", "(", "self", ",", "params", ")", ":", "if", "isinstance", "(", "params", ",", "dict", ")", "is", "False", ":", "raise", "InvalidUsage", "(", "\"custom parameters must be of type `dict`\"", ")", "self", ".", "_custom_params", ".", "update", ...
Adds new custom parameter after making sure it's of type dict. :param params: Dictionary containing one or more parameters
[ "Adds", "new", "custom", "parameter", "after", "making", "sure", "it", "s", "of", "type", "dict", "." ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/params_builder.py#L48-L57
train
208,547
rbw/pysnow
pysnow/params_builder.py
ParamsBuilder.offset
def offset(self, offset): """Sets `sysparm_offset`, usually used to accomplish pagination :param offset: Number of records to skip before fetching records :raise: :InvalidUsage: if offset is of an unexpected type """ if not isinstance(offset, int) or isinstance(offset, bool): raise InvalidUsage('Offset must be an integer') self._sysparms['sysparm_offset'] = offset
python
def offset(self, offset): """Sets `sysparm_offset`, usually used to accomplish pagination :param offset: Number of records to skip before fetching records :raise: :InvalidUsage: if offset is of an unexpected type """ if not isinstance(offset, int) or isinstance(offset, bool): raise InvalidUsage('Offset must be an integer') self._sysparms['sysparm_offset'] = offset
[ "def", "offset", "(", "self", ",", "offset", ")", ":", "if", "not", "isinstance", "(", "offset", ",", "int", ")", "or", "isinstance", "(", "offset", ",", "bool", ")", ":", "raise", "InvalidUsage", "(", "'Offset must be an integer'", ")", "self", ".", "_s...
Sets `sysparm_offset`, usually used to accomplish pagination :param offset: Number of records to skip before fetching records :raise: :InvalidUsage: if offset is of an unexpected type
[ "Sets", "sysparm_offset", "usually", "used", "to", "accomplish", "pagination" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/params_builder.py#L118-L129
train
208,548
rbw/pysnow
pysnow/params_builder.py
ParamsBuilder.fields
def fields(self, fields): """Sets `sysparm_fields` after joining the given list of `fields` :param fields: List of fields to include in the response :raise: :InvalidUsage: if fields is of an unexpected type """ if not isinstance(fields, list): raise InvalidUsage('fields must be of type `list`') self._sysparms['sysparm_fields'] = ",".join(fields)
python
def fields(self, fields): """Sets `sysparm_fields` after joining the given list of `fields` :param fields: List of fields to include in the response :raise: :InvalidUsage: if fields is of an unexpected type """ if not isinstance(fields, list): raise InvalidUsage('fields must be of type `list`') self._sysparms['sysparm_fields'] = ",".join(fields)
[ "def", "fields", "(", "self", ",", "fields", ")", ":", "if", "not", "isinstance", "(", "fields", ",", "list", ")", ":", "raise", "InvalidUsage", "(", "'fields must be of type `list`'", ")", "self", ".", "_sysparms", "[", "'sysparm_fields'", "]", "=", "\",\""...
Sets `sysparm_fields` after joining the given list of `fields` :param fields: List of fields to include in the response :raise: :InvalidUsage: if fields is of an unexpected type
[ "Sets", "sysparm_fields", "after", "joining", "the", "given", "list", "of", "fields" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/params_builder.py#L137-L148
train
208,549
rbw/pysnow
pysnow/params_builder.py
ParamsBuilder.exclude_reference_link
def exclude_reference_link(self, exclude): """Sets `sysparm_exclude_reference_link` to a bool value :param exclude: bool """ if not isinstance(exclude, bool): raise InvalidUsage('exclude_reference_link must be of type bool') self._sysparms['sysparm_exclude_reference_link'] = exclude
python
def exclude_reference_link(self, exclude): """Sets `sysparm_exclude_reference_link` to a bool value :param exclude: bool """ if not isinstance(exclude, bool): raise InvalidUsage('exclude_reference_link must be of type bool') self._sysparms['sysparm_exclude_reference_link'] = exclude
[ "def", "exclude_reference_link", "(", "self", ",", "exclude", ")", ":", "if", "not", "isinstance", "(", "exclude", ",", "bool", ")", ":", "raise", "InvalidUsage", "(", "'exclude_reference_link must be of type bool'", ")", "self", ".", "_sysparms", "[", "'sysparm_e...
Sets `sysparm_exclude_reference_link` to a bool value :param exclude: bool
[ "Sets", "sysparm_exclude_reference_link", "to", "a", "bool", "value" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/params_builder.py#L156-L164
train
208,550
rbw/pysnow
pysnow/params_builder.py
ParamsBuilder.suppress_pagination_header
def suppress_pagination_header(self, suppress): """Enables or disables pagination header by setting `sysparm_suppress_pagination_header` :param suppress: bool """ if not isinstance(suppress, bool): raise InvalidUsage('suppress_pagination_header must be of type bool') self._sysparms['sysparm_suppress_pagination_header'] = suppress
python
def suppress_pagination_header(self, suppress): """Enables or disables pagination header by setting `sysparm_suppress_pagination_header` :param suppress: bool """ if not isinstance(suppress, bool): raise InvalidUsage('suppress_pagination_header must be of type bool') self._sysparms['sysparm_suppress_pagination_header'] = suppress
[ "def", "suppress_pagination_header", "(", "self", ",", "suppress", ")", ":", "if", "not", "isinstance", "(", "suppress", ",", "bool", ")", ":", "raise", "InvalidUsage", "(", "'suppress_pagination_header must be of type bool'", ")", "self", ".", "_sysparms", "[", "...
Enables or disables pagination header by setting `sysparm_suppress_pagination_header` :param suppress: bool
[ "Enables", "or", "disables", "pagination", "header", "by", "setting", "sysparm_suppress_pagination_header" ]
87c8ce0d3a089c2f59247f30efbd545fcdb8e985
https://github.com/rbw/pysnow/blob/87c8ce0d3a089c2f59247f30efbd545fcdb8e985/pysnow/params_builder.py#L172-L180
train
208,551
TomasTomecek/sen
sen/tui/ui.py
UI.quit
def quit(self): """ This could be called from another thread, so let's do this via alarm """ def q(*args): raise urwid.ExitMainLoop() self.worker.shutdown(wait=False) self.ui_worker.shutdown(wait=False) self.loop.set_alarm_in(0, q)
python
def quit(self): """ This could be called from another thread, so let's do this via alarm """ def q(*args): raise urwid.ExitMainLoop() self.worker.shutdown(wait=False) self.ui_worker.shutdown(wait=False) self.loop.set_alarm_in(0, q)
[ "def", "quit", "(", "self", ")", ":", "def", "q", "(", "*", "args", ")", ":", "raise", "urwid", ".", "ExitMainLoop", "(", ")", "self", ".", "worker", ".", "shutdown", "(", "wait", "=", "False", ")", "self", ".", "ui_worker", ".", "shutdown", "(", ...
This could be called from another thread, so let's do this via alarm
[ "This", "could", "be", "called", "from", "another", "thread", "so", "let", "s", "do", "this", "via", "alarm" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/ui.py#L82-L90
train
208,552
TomasTomecek/sen
sen/tui/ui.py
UI._set_main_widget
def _set_main_widget(self, widget, redraw): """ add provided widget to widget list and display it :param widget: :return: """ self.set_body(widget) self.reload_footer() if redraw: logger.debug("redraw main widget") self.refresh()
python
def _set_main_widget(self, widget, redraw): """ add provided widget to widget list and display it :param widget: :return: """ self.set_body(widget) self.reload_footer() if redraw: logger.debug("redraw main widget") self.refresh()
[ "def", "_set_main_widget", "(", "self", ",", "widget", ",", "redraw", ")", ":", "self", ".", "set_body", "(", "widget", ")", "self", ".", "reload_footer", "(", ")", "if", "redraw", ":", "logger", ".", "debug", "(", "\"redraw main widget\"", ")", "self", ...
add provided widget to widget list and display it :param widget: :return:
[ "add", "provided", "widget", "to", "widget", "list", "and", "display", "it" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/ui.py#L93-L104
train
208,553
TomasTomecek/sen
sen/tui/ui.py
UI.display_buffer
def display_buffer(self, buffer, redraw=True): """ display provided buffer :param buffer: Buffer :return: """ logger.debug("display buffer %r", buffer) self.buffer_movement_history.append(buffer) self.current_buffer = buffer self._set_main_widget(buffer.widget, redraw=redraw)
python
def display_buffer(self, buffer, redraw=True): """ display provided buffer :param buffer: Buffer :return: """ logger.debug("display buffer %r", buffer) self.buffer_movement_history.append(buffer) self.current_buffer = buffer self._set_main_widget(buffer.widget, redraw=redraw)
[ "def", "display_buffer", "(", "self", ",", "buffer", ",", "redraw", "=", "True", ")", ":", "logger", ".", "debug", "(", "\"display buffer %r\"", ",", "buffer", ")", "self", ".", "buffer_movement_history", ".", "append", "(", "buffer", ")", "self", ".", "cu...
display provided buffer :param buffer: Buffer :return:
[ "display", "provided", "buffer" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/ui.py#L106-L116
train
208,554
TomasTomecek/sen
sen/tui/ui.py
UI.add_and_display_buffer
def add_and_display_buffer(self, buffer, redraw=True): """ add provided buffer to buffer list and display it :param buffer: :return: """ # FIXME: some buffers have arguments, do a proper comparison -- override __eq__ if buffer not in self.buffers: logger.debug("adding new buffer {!r}".format(buffer)) self.buffers.append(buffer) self.display_buffer(buffer, redraw=redraw)
python
def add_and_display_buffer(self, buffer, redraw=True): """ add provided buffer to buffer list and display it :param buffer: :return: """ # FIXME: some buffers have arguments, do a proper comparison -- override __eq__ if buffer not in self.buffers: logger.debug("adding new buffer {!r}".format(buffer)) self.buffers.append(buffer) self.display_buffer(buffer, redraw=redraw)
[ "def", "add_and_display_buffer", "(", "self", ",", "buffer", ",", "redraw", "=", "True", ")", ":", "# FIXME: some buffers have arguments, do a proper comparison -- override __eq__", "if", "buffer", "not", "in", "self", ".", "buffers", ":", "logger", ".", "debug", "(",...
add provided buffer to buffer list and display it :param buffer: :return:
[ "add", "provided", "buffer", "to", "buffer", "list", "and", "display", "it" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/ui.py#L118-L129
train
208,555
TomasTomecek/sen
sen/tui/ui.py
UI.pick_and_display_buffer
def pick_and_display_buffer(self, i): """ pick i-th buffer from list and display it :param i: int :return: None """ if len(self.buffers) == 1: # we don't need to display anything # listing is already displayed return else: try: self.display_buffer(self.buffers[i]) except IndexError: # i > len self.display_buffer(self.buffers[0])
python
def pick_and_display_buffer(self, i): """ pick i-th buffer from list and display it :param i: int :return: None """ if len(self.buffers) == 1: # we don't need to display anything # listing is already displayed return else: try: self.display_buffer(self.buffers[i]) except IndexError: # i > len self.display_buffer(self.buffers[0])
[ "def", "pick_and_display_buffer", "(", "self", ",", "i", ")", ":", "if", "len", "(", "self", ".", "buffers", ")", "==", "1", ":", "# we don't need to display anything", "# listing is already displayed", "return", "else", ":", "try", ":", "self", ".", "display_bu...
pick i-th buffer from list and display it :param i: int :return: None
[ "pick", "i", "-", "th", "buffer", "from", "list", "and", "display", "it" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/ui.py#L131-L147
train
208,556
TomasTomecek/sen
sen/tui/ui.py
ThreadSafeLoop.refresh
def refresh(self): """ explicitely refresh user interface; useful when changing widgets dynamically """ logger.debug("refresh user interface") try: with self.refresh_lock: self.draw_screen() except AssertionError: logger.warning("application is not running") pass
python
def refresh(self): """ explicitely refresh user interface; useful when changing widgets dynamically """ logger.debug("refresh user interface") try: with self.refresh_lock: self.draw_screen() except AssertionError: logger.warning("application is not running") pass
[ "def", "refresh", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"refresh user interface\"", ")", "try", ":", "with", "self", ".", "refresh_lock", ":", "self", ".", "draw_screen", "(", ")", "except", "AssertionError", ":", "logger", ".", "warning", ...
explicitely refresh user interface; useful when changing widgets dynamically
[ "explicitely", "refresh", "user", "interface", ";", "useful", "when", "changing", "widgets", "dynamically" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/ui.py#L350-L360
train
208,557
TomasTomecek/sen
sen/tui/widgets/list/common.py
strip_from_ansi_esc_sequences
def strip_from_ansi_esc_sequences(text): """ find ANSI escape sequences in text and remove them :param text: str :return: list, should be passed to ListBox """ # esc[ + values + control character # h, l, p commands are complicated, let's ignore them seq_regex = r"\x1b\[[0-9;]*[mKJusDCBAfH]" regex = re.compile(seq_regex) start = 0 response = "" for match in regex.finditer(text): end = match.start() response += text[start:end] start = match.end() response += text[start:len(text)] return response
python
def strip_from_ansi_esc_sequences(text): """ find ANSI escape sequences in text and remove them :param text: str :return: list, should be passed to ListBox """ # esc[ + values + control character # h, l, p commands are complicated, let's ignore them seq_regex = r"\x1b\[[0-9;]*[mKJusDCBAfH]" regex = re.compile(seq_regex) start = 0 response = "" for match in regex.finditer(text): end = match.start() response += text[start:end] start = match.end() response += text[start:len(text)] return response
[ "def", "strip_from_ansi_esc_sequences", "(", "text", ")", ":", "# esc[ + values + control character", "# h, l, p commands are complicated, let's ignore them", "seq_regex", "=", "r\"\\x1b\\[[0-9;]*[mKJusDCBAfH]\"", "regex", "=", "re", ".", "compile", "(", "seq_regex", ")", "star...
find ANSI escape sequences in text and remove them :param text: str :return: list, should be passed to ListBox
[ "find", "ANSI", "escape", "sequences", "in", "text", "and", "remove", "them" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/widgets/list/common.py#L23-L42
train
208,558
TomasTomecek/sen
sen/tui/init.py
Application.realtime_updates
def realtime_updates(self): """ fetch realtime events from docker and pass them to buffers :return: None """ # TODO: make this available for every buffer logger.info("starting receiving events from docker") it = self.d.realtime_updates() while True: try: event = next(it) except NotifyError as ex: self.ui.notify_message("error when receiving realtime events from docker: %s" % ex, level="error") return # FIXME: we should pass events to all buffers # ATM the buffers can't be rendered since they are not displayed # and hence traceback like this: ListBoxError("Listbox contents too short! ... logger.debug("pass event to current buffer %s", self.ui.current_buffer) try: self.ui.current_buffer.process_realtime_event(event) except Exception as ex: # swallow any exc logger.error("error while processing runtime event: %r", ex)
python
def realtime_updates(self): """ fetch realtime events from docker and pass them to buffers :return: None """ # TODO: make this available for every buffer logger.info("starting receiving events from docker") it = self.d.realtime_updates() while True: try: event = next(it) except NotifyError as ex: self.ui.notify_message("error when receiving realtime events from docker: %s" % ex, level="error") return # FIXME: we should pass events to all buffers # ATM the buffers can't be rendered since they are not displayed # and hence traceback like this: ListBoxError("Listbox contents too short! ... logger.debug("pass event to current buffer %s", self.ui.current_buffer) try: self.ui.current_buffer.process_realtime_event(event) except Exception as ex: # swallow any exc logger.error("error while processing runtime event: %r", ex)
[ "def", "realtime_updates", "(", "self", ")", ":", "# TODO: make this available for every buffer", "logger", ".", "info", "(", "\"starting receiving events from docker\"", ")", "it", "=", "self", ".", "d", ".", "realtime_updates", "(", ")", "while", "True", ":", "try...
fetch realtime events from docker and pass them to buffers :return: None
[ "fetch", "realtime", "events", "from", "docker", "and", "pass", "them", "to", "buffers" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/init.py#L36-L60
train
208,559
TomasTomecek/sen
sen/util.py
setup_dirs
def setup_dirs(): """Make required directories to hold logfile. :returns: str """ try: top_dir = os.path.abspath(os.path.expanduser(os.environ["XDG_CACHE_HOME"])) except KeyError: top_dir = os.path.abspath(os.path.expanduser("~/.cache")) our_cache_dir = os.path.join(top_dir, PROJECT_NAME) os.makedirs(our_cache_dir, mode=0o775, exist_ok=True) return our_cache_dir
python
def setup_dirs(): """Make required directories to hold logfile. :returns: str """ try: top_dir = os.path.abspath(os.path.expanduser(os.environ["XDG_CACHE_HOME"])) except KeyError: top_dir = os.path.abspath(os.path.expanduser("~/.cache")) our_cache_dir = os.path.join(top_dir, PROJECT_NAME) os.makedirs(our_cache_dir, mode=0o775, exist_ok=True) return our_cache_dir
[ "def", "setup_dirs", "(", ")", ":", "try", ":", "top_dir", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "os", ".", "environ", "[", "\"XDG_CACHE_HOME\"", "]", ")", ")", "except", "KeyError", ":", "top_dir", "...
Make required directories to hold logfile. :returns: str
[ "Make", "required", "directories", "to", "hold", "logfile", "." ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/util.py#L42-L53
train
208,560
TomasTomecek/sen
sen/util.py
humanize_bytes
def humanize_bytes(bytesize, precision=2): """ Humanize byte size figures https://gist.github.com/moird/3684595 """ abbrevs = ( (1 << 50, 'PB'), (1 << 40, 'TB'), (1 << 30, 'GB'), (1 << 20, 'MB'), (1 << 10, 'kB'), (1, 'bytes') ) if bytesize == 1: return '1 byte' for factor, suffix in abbrevs: if bytesize >= factor: break if factor == 1: precision = 0 return '%.*f %s' % (precision, bytesize / float(factor), suffix)
python
def humanize_bytes(bytesize, precision=2): """ Humanize byte size figures https://gist.github.com/moird/3684595 """ abbrevs = ( (1 << 50, 'PB'), (1 << 40, 'TB'), (1 << 30, 'GB'), (1 << 20, 'MB'), (1 << 10, 'kB'), (1, 'bytes') ) if bytesize == 1: return '1 byte' for factor, suffix in abbrevs: if bytesize >= factor: break if factor == 1: precision = 0 return '%.*f %s' % (precision, bytesize / float(factor), suffix)
[ "def", "humanize_bytes", "(", "bytesize", ",", "precision", "=", "2", ")", ":", "abbrevs", "=", "(", "(", "1", "<<", "50", ",", "'PB'", ")", ",", "(", "1", "<<", "40", ",", "'TB'", ")", ",", "(", "1", "<<", "30", ",", "'GB'", ")", ",", "(", ...
Humanize byte size figures https://gist.github.com/moird/3684595
[ "Humanize", "byte", "size", "figures" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/util.py#L60-L81
train
208,561
TomasTomecek/sen
sen/docker_backend.py
DockerObject.metadata_get
def metadata_get(self, path, cached=True): """ get metadata from inspect, specified by path :param path: list of str :param cached: bool, use cached version of inspect if available """ try: value = graceful_chain_get(self.inspect(cached=cached).response, *path) except docker.errors.NotFound: logger.warning("object %s is not available anymore", self) raise NotAvailableAnymore() return value
python
def metadata_get(self, path, cached=True): """ get metadata from inspect, specified by path :param path: list of str :param cached: bool, use cached version of inspect if available """ try: value = graceful_chain_get(self.inspect(cached=cached).response, *path) except docker.errors.NotFound: logger.warning("object %s is not available anymore", self) raise NotAvailableAnymore() return value
[ "def", "metadata_get", "(", "self", ",", "path", ",", "cached", "=", "True", ")", ":", "try", ":", "value", "=", "graceful_chain_get", "(", "self", ".", "inspect", "(", "cached", "=", "cached", ")", ".", "response", ",", "*", "path", ")", "except", "...
get metadata from inspect, specified by path :param path: list of str :param cached: bool, use cached version of inspect if available
[ "get", "metadata", "from", "inspect", "specified", "by", "path" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/docker_backend.py#L228-L240
train
208,562
TomasTomecek/sen
sen/docker_backend.py
DockerImage.unique_size
def unique_size(self): """ Size of ONLY this particular layer :return: int or None """ self._virtual_size = self._virtual_size or \ graceful_chain_get(self.data, "VirtualSize", default=0) try: return self._virtual_size - self._shared_size except TypeError: return 0
python
def unique_size(self): """ Size of ONLY this particular layer :return: int or None """ self._virtual_size = self._virtual_size or \ graceful_chain_get(self.data, "VirtualSize", default=0) try: return self._virtual_size - self._shared_size except TypeError: return 0
[ "def", "unique_size", "(", "self", ")", ":", "self", ".", "_virtual_size", "=", "self", ".", "_virtual_size", "or", "graceful_chain_get", "(", "self", ".", "data", ",", "\"VirtualSize\"", ",", "default", "=", "0", ")", "try", ":", "return", "self", ".", ...
Size of ONLY this particular layer :return: int or None
[ "Size", "of", "ONLY", "this", "particular", "layer" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/docker_backend.py#L384-L395
train
208,563
TomasTomecek/sen
sen/docker_backend.py
DockerContainer.image_id
def image_id(self): """ this container is created from image with id...""" try: # docker >= 1.9 image_id = self.data["ImageID"] except KeyError: # docker <= 1.8 image_id = self.metadata_get(["Image"]) return image_id
python
def image_id(self): """ this container is created from image with id...""" try: # docker >= 1.9 image_id = self.data["ImageID"] except KeyError: # docker <= 1.8 image_id = self.metadata_get(["Image"]) return image_id
[ "def", "image_id", "(", "self", ")", ":", "try", ":", "# docker >= 1.9", "image_id", "=", "self", ".", "data", "[", "\"ImageID\"", "]", "except", "KeyError", ":", "# docker <= 1.8", "image_id", "=", "self", ".", "metadata_get", "(", "[", "\"Image\"", "]", ...
this container is created from image with id...
[ "this", "container", "is", "created", "from", "image", "with", "id", "..." ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/docker_backend.py#L591-L599
train
208,564
TomasTomecek/sen
sen/docker_backend.py
DockerContainer.net
def net(self): """ get ACTIVE port mappings of a container :return: dict: { "host_port": "container_port" } """ try: return NetData(self.inspect(cached=True).response) except docker.errors.NotFound: raise NotAvailableAnymore()
python
def net(self): """ get ACTIVE port mappings of a container :return: dict: { "host_port": "container_port" } """ try: return NetData(self.inspect(cached=True).response) except docker.errors.NotFound: raise NotAvailableAnymore()
[ "def", "net", "(", "self", ")", ":", "try", ":", "return", "NetData", "(", "self", ".", "inspect", "(", "cached", "=", "True", ")", ".", "response", ")", "except", "docker", ".", "errors", ".", "NotFound", ":", "raise", "NotAvailableAnymore", "(", ")" ...
get ACTIVE port mappings of a container :return: dict: { "host_port": "container_port" }
[ "get", "ACTIVE", "port", "mappings", "of", "a", "container" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/docker_backend.py#L612-L624
train
208,565
TomasTomecek/sen
sen/docker_backend.py
DockerContainer.top
def top(self): """ list of processes in a running container :return: None or list of dicts """ # let's get resources from .stats() ps_args = "-eo pid,ppid,wchan,args" # returns {"Processes": [values], "Titles": [values]} # it's easier to play with list of dicts: [{"pid": 1, "ppid": 0}] try: response = self.d.top(self.container_id, ps_args=ps_args) except docker.errors.APIError as ex: logger.warning("error getting processes: %r", ex) return [] # TODO: sort? logger.debug(json.dumps(response, indent=2)) return [dict(zip(response["Titles"], process)) for process in response["Processes"] or []]
python
def top(self): """ list of processes in a running container :return: None or list of dicts """ # let's get resources from .stats() ps_args = "-eo pid,ppid,wchan,args" # returns {"Processes": [values], "Titles": [values]} # it's easier to play with list of dicts: [{"pid": 1, "ppid": 0}] try: response = self.d.top(self.container_id, ps_args=ps_args) except docker.errors.APIError as ex: logger.warning("error getting processes: %r", ex) return [] # TODO: sort? logger.debug(json.dumps(response, indent=2)) return [dict(zip(response["Titles"], process)) for process in response["Processes"] or []]
[ "def", "top", "(", "self", ")", ":", "# let's get resources from .stats()", "ps_args", "=", "\"-eo pid,ppid,wchan,args\"", "# returns {\"Processes\": [values], \"Titles\": [values]}", "# it's easier to play with list of dicts: [{\"pid\": 1, \"ppid\": 0}]", "try", ":", "response", "=", ...
list of processes in a running container :return: None or list of dicts
[ "list", "of", "processes", "in", "a", "running", "container" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/docker_backend.py#L708-L726
train
208,566
TomasTomecek/sen
sen/docker_backend.py
DockerBackend.filter
def filter(self, containers=True, images=True, stopped=True, cached=False, sort_by_created=True): """ since django is so awesome, let's use their ORM API :return: """ content = [] containers_o = None images_o = None # return containers when containers=False and running=True if containers or not stopped: containers_o = self.get_containers(cached=cached, stopped=stopped) content += containers_o.response if images: images_o = self.get_images(cached=cached) content += images_o.response if sort_by_created: content.sort(key=attrgetter("natural_sort_value"), reverse=True) return content, containers_o, images_o
python
def filter(self, containers=True, images=True, stopped=True, cached=False, sort_by_created=True): """ since django is so awesome, let's use their ORM API :return: """ content = [] containers_o = None images_o = None # return containers when containers=False and running=True if containers or not stopped: containers_o = self.get_containers(cached=cached, stopped=stopped) content += containers_o.response if images: images_o = self.get_images(cached=cached) content += images_o.response if sort_by_created: content.sort(key=attrgetter("natural_sort_value"), reverse=True) return content, containers_o, images_o
[ "def", "filter", "(", "self", ",", "containers", "=", "True", ",", "images", "=", "True", ",", "stopped", "=", "True", ",", "cached", "=", "False", ",", "sort_by_created", "=", "True", ")", ":", "content", "=", "[", "]", "containers_o", "=", "None", ...
since django is so awesome, let's use their ORM API :return:
[ "since", "django", "is", "so", "awesome", "let", "s", "use", "their", "ORM", "API" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/docker_backend.py#L912-L930
train
208,567
TomasTomecek/sen
sen/tui/widgets/responsive_column.py
ResponsiveColumns.column_widths
def column_widths(self, size, focus=False): """ Return a list of column widths. 0 values in the list mean hide corresponding column completely """ maxcol = size[0] self._cache_maxcol = maxcol widths = [width for i, (w, (t, width, b)) in enumerate(self.contents)] self._cache_column_widths = widths return widths
python
def column_widths(self, size, focus=False): """ Return a list of column widths. 0 values in the list mean hide corresponding column completely """ maxcol = size[0] self._cache_maxcol = maxcol widths = [width for i, (w, (t, width, b)) in enumerate(self.contents)] self._cache_column_widths = widths return widths
[ "def", "column_widths", "(", "self", ",", "size", ",", "focus", "=", "False", ")", ":", "maxcol", "=", "size", "[", "0", "]", "self", ".", "_cache_maxcol", "=", "maxcol", "widths", "=", "[", "width", "for", "i", ",", "(", "w", ",", "(", "t", ",",...
Return a list of column widths. 0 values in the list mean hide corresponding column completely
[ "Return", "a", "list", "of", "column", "widths", "." ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/widgets/responsive_column.py#L14-L25
train
208,568
TomasTomecek/sen
sen/tui/views/main.py
MainListBox.query
def query(self, query_string=""): """ query and display, also apply filters :param query_string: str :return: None """ def query_notify(operation): w = get_operation_notify_widget(operation, display_always=False) if w: self.ui.notify_widget(w) if query_string is not None: self.filter_query = query_string.strip() # FIXME: this could be part of filter command since it's command line backend_query = { "cached": False, "containers": True, "images": True, } def containers(): backend_query["containers"] = True backend_query["images"] = not backend_query["images"] backend_query["cached"] = True def images(): backend_query["containers"] = not backend_query["containers"] backend_query["images"] = True backend_query["cached"] = True def running(): backend_query["stopped"] = False backend_query["cached"] = True backend_query["images"] = False query_conf = [ { "query_keys": ["t", "type"], "query_values": ["c", "container", "containers"], "callback": containers }, { "query_keys": ["t", "type"], "query_values": ["i", "images", "images"], "callback": images }, { "query_keys": ["s", "state"], "query_values": ["r", "running"], "callback": running }, ] query_list = re.split(r"[\s,]", self.filter_query) unprocessed = [] for query_str in query_list: if not query_str: continue # process here x=y queries and pass rest to parent filter() try: query_key, query_value = query_str.split("=", 1) except ValueError: unprocessed.append(query_str) else: logger.debug("looking up query key %r and query value %r", query_key, query_value) for c in query_conf: if query_key in c["query_keys"] and query_value in c["query_values"]: c["callback"]() break else: raise NotifyError("Invalid query string: %r", query_str) widgets = [] logger.debug("doing query %s", backend_query) query, c_op, i_op = self.d.filter(**backend_query) for o in query: try: line = MainLineWidget(o) except NotAvailableAnymore: continue widgets.append(line) if unprocessed: new_query = " ".join(unprocessed) logger.debug("doing parent query for unprocessed string: %r", new_query) super().filter(new_query, widgets_to_filter=widgets) else: self.set_body(widgets) self.ro_content = widgets query_notify(i_op) query_notify(c_op)
python
def query(self, query_string=""): """ query and display, also apply filters :param query_string: str :return: None """ def query_notify(operation): w = get_operation_notify_widget(operation, display_always=False) if w: self.ui.notify_widget(w) if query_string is not None: self.filter_query = query_string.strip() # FIXME: this could be part of filter command since it's command line backend_query = { "cached": False, "containers": True, "images": True, } def containers(): backend_query["containers"] = True backend_query["images"] = not backend_query["images"] backend_query["cached"] = True def images(): backend_query["containers"] = not backend_query["containers"] backend_query["images"] = True backend_query["cached"] = True def running(): backend_query["stopped"] = False backend_query["cached"] = True backend_query["images"] = False query_conf = [ { "query_keys": ["t", "type"], "query_values": ["c", "container", "containers"], "callback": containers }, { "query_keys": ["t", "type"], "query_values": ["i", "images", "images"], "callback": images }, { "query_keys": ["s", "state"], "query_values": ["r", "running"], "callback": running }, ] query_list = re.split(r"[\s,]", self.filter_query) unprocessed = [] for query_str in query_list: if not query_str: continue # process here x=y queries and pass rest to parent filter() try: query_key, query_value = query_str.split("=", 1) except ValueError: unprocessed.append(query_str) else: logger.debug("looking up query key %r and query value %r", query_key, query_value) for c in query_conf: if query_key in c["query_keys"] and query_value in c["query_values"]: c["callback"]() break else: raise NotifyError("Invalid query string: %r", query_str) widgets = [] logger.debug("doing query %s", backend_query) query, c_op, i_op = self.d.filter(**backend_query) for o in query: try: line = MainLineWidget(o) except NotAvailableAnymore: continue widgets.append(line) if unprocessed: new_query = " ".join(unprocessed) logger.debug("doing parent query for unprocessed string: %r", new_query) super().filter(new_query, widgets_to_filter=widgets) else: self.set_body(widgets) self.ro_content = widgets query_notify(i_op) query_notify(c_op)
[ "def", "query", "(", "self", ",", "query_string", "=", "\"\"", ")", ":", "def", "query_notify", "(", "operation", ")", ":", "w", "=", "get_operation_notify_widget", "(", "operation", ",", "display_always", "=", "False", ")", "if", "w", ":", "self", ".", ...
query and display, also apply filters :param query_string: str :return: None
[ "query", "and", "display", "also", "apply", "filters" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/views/main.py#L89-L180
train
208,569
TomasTomecek/sen
sen/tui/commands/base.py
Commander.get_command
def get_command(self, command_input, docker_object=None, buffer=None, size=None): """ return command instance which is the actual command to be executed :param command_input: str, command name and its args: "command arg arg2=val opt" :param docker_object: :param buffer: :param size: tuple, so we can call urwid.keypress(size, ...) :return: instance of Command """ logger.debug("get command for command input %r", command_input) if not command_input: # noop, don't do anything return if command_input[0] in ["/"]: # we could add here !, @, ... command_name = command_input[0] unparsed_command_args = shlex.split(command_input[1:]) else: command_input_list = shlex.split(command_input) command_name = command_input_list[0] unparsed_command_args = command_input_list[1:] try: CommandClass = commands_mapping[command_name] except KeyError: logger.info("no such command: %r", command_name) raise NoSuchCommand("There is no such command: %s" % command_name) else: cmd = CommandClass(ui=self.ui, docker_backend=self.docker_backend, docker_object=docker_object, buffer=buffer, size=size) cmd.process_args(unparsed_command_args) return cmd
python
def get_command(self, command_input, docker_object=None, buffer=None, size=None): """ return command instance which is the actual command to be executed :param command_input: str, command name and its args: "command arg arg2=val opt" :param docker_object: :param buffer: :param size: tuple, so we can call urwid.keypress(size, ...) :return: instance of Command """ logger.debug("get command for command input %r", command_input) if not command_input: # noop, don't do anything return if command_input[0] in ["/"]: # we could add here !, @, ... command_name = command_input[0] unparsed_command_args = shlex.split(command_input[1:]) else: command_input_list = shlex.split(command_input) command_name = command_input_list[0] unparsed_command_args = command_input_list[1:] try: CommandClass = commands_mapping[command_name] except KeyError: logger.info("no such command: %r", command_name) raise NoSuchCommand("There is no such command: %s" % command_name) else: cmd = CommandClass(ui=self.ui, docker_backend=self.docker_backend, docker_object=docker_object, buffer=buffer, size=size) cmd.process_args(unparsed_command_args) return cmd
[ "def", "get_command", "(", "self", ",", "command_input", ",", "docker_object", "=", "None", ",", "buffer", "=", "None", ",", "size", "=", "None", ")", ":", "logger", ".", "debug", "(", "\"get command for command input %r\"", ",", "command_input", ")", "if", ...
return command instance which is the actual command to be executed :param command_input: str, command name and its args: "command arg arg2=val opt" :param docker_object: :param buffer: :param size: tuple, so we can call urwid.keypress(size, ...) :return: instance of Command
[ "return", "command", "instance", "which", "is", "the", "actual", "command", "to", "be", "executed" ]
239b4868125814e8bf5527708119fc08b35f6cc0
https://github.com/TomasTomecek/sen/blob/239b4868125814e8bf5527708119fc08b35f6cc0/sen/tui/commands/base.py#L252-L285
train
208,570
respeaker/respeaker_python_library
respeaker/gpio.py
Gpio.read
def read(self): """ Read pin value @rtype: int @return: I{0} when LOW, I{1} when HIGH """ val = self._fd.read() self._fd.seek(0) return int(val)
python
def read(self): """ Read pin value @rtype: int @return: I{0} when LOW, I{1} when HIGH """ val = self._fd.read() self._fd.seek(0) return int(val)
[ "def", "read", "(", "self", ")", ":", "val", "=", "self", ".", "_fd", ".", "read", "(", ")", "self", ".", "_fd", ".", "seek", "(", "0", ")", "return", "int", "(", "val", ")" ]
Read pin value @rtype: int @return: I{0} when LOW, I{1} when HIGH
[ "Read", "pin", "value" ]
905a5334ccdc2d474ad973caf6a23d05c65bbb25
https://github.com/respeaker/respeaker_python_library/blob/905a5334ccdc2d474ad973caf6a23d05c65bbb25/respeaker/gpio.py#L183-L192
train
208,571
MAVENSDC/cdflib
cdflib/epochs.py
CDFepoch.getVersion
def getVersion(): # @NoSelf """ Shows the code version. """ print('epochs version:', str(CDFepoch.version) + '.' + str(CDFepoch.release) + '.'+str(CDFepoch.increment))
python
def getVersion(): # @NoSelf """ Shows the code version. """ print('epochs version:', str(CDFepoch.version) + '.' + str(CDFepoch.release) + '.'+str(CDFepoch.increment))
[ "def", "getVersion", "(", ")", ":", "# @NoSelf", "print", "(", "'epochs version:'", ",", "str", "(", "CDFepoch", ".", "version", ")", "+", "'.'", "+", "str", "(", "CDFepoch", ".", "release", ")", "+", "'.'", "+", "str", "(", "CDFepoch", ".", "increment...
Shows the code version.
[ "Shows", "the", "code", "version", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/epochs.py#L1749-L1754
train
208,572
MAVENSDC/cdflib
cdflib/epochs.py
CDFepoch.getLeapSecondLastUpdated
def getLeapSecondLastUpdated(): # @NoSelf """ Shows the latest date a leap second was added to the leap second table. """ print('Leap second last updated:', str(CDFepoch.LTS[-1][0]) + '-' + str(CDFepoch.LTS[-1][1]) + '-' + str(CDFepoch.LTS[-1][2]))
python
def getLeapSecondLastUpdated(): # @NoSelf """ Shows the latest date a leap second was added to the leap second table. """ print('Leap second last updated:', str(CDFepoch.LTS[-1][0]) + '-' + str(CDFepoch.LTS[-1][1]) + '-' + str(CDFepoch.LTS[-1][2]))
[ "def", "getLeapSecondLastUpdated", "(", ")", ":", "# @NoSelf", "print", "(", "'Leap second last updated:'", ",", "str", "(", "CDFepoch", ".", "LTS", "[", "-", "1", "]", "[", "0", "]", ")", "+", "'-'", "+", "str", "(", "CDFepoch", ".", "LTS", "[", "-", ...
Shows the latest date a leap second was added to the leap second table.
[ "Shows", "the", "latest", "date", "a", "leap", "second", "was", "added", "to", "the", "leap", "second", "table", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/epochs.py#L1756-L1761
train
208,573
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF.close
def close(self): ''' Closes the CDF Class. 1. If compression was set, this is where the compressed file is written. 2. If a checksum is needed, this will place the checksum at the end of the file. ''' if self.compressed_file is None: with self.path.open('rb+') as f: f.seek(0, 2) eof = f.tell() self._update_offset_value(f, self.gdr_head+36, 8, eof) if self.checksum: f.write(self._md5_compute(f)) return # %% with self.path.open('rb+') as f: f.seek(0, 2) eof = f.tell() self._update_offset_value(f, self.gdr_head+36, 8, eof) with self.compressed_file.open('wb+') as g: g.write(bytearray.fromhex(CDF.V3magicNUMBER_1)) g.write(bytearray.fromhex(CDF.V3magicNUMBER_2c)) self._write_ccr(f, g, self.compression) if self.checksum: g.seek(0, 2) g.write(self._md5_compute(g)) self.path.unlink() # NOTE: for Windows this is necessary self.compressed_file.rename(self.path)
python
def close(self): ''' Closes the CDF Class. 1. If compression was set, this is where the compressed file is written. 2. If a checksum is needed, this will place the checksum at the end of the file. ''' if self.compressed_file is None: with self.path.open('rb+') as f: f.seek(0, 2) eof = f.tell() self._update_offset_value(f, self.gdr_head+36, 8, eof) if self.checksum: f.write(self._md5_compute(f)) return # %% with self.path.open('rb+') as f: f.seek(0, 2) eof = f.tell() self._update_offset_value(f, self.gdr_head+36, 8, eof) with self.compressed_file.open('wb+') as g: g.write(bytearray.fromhex(CDF.V3magicNUMBER_1)) g.write(bytearray.fromhex(CDF.V3magicNUMBER_2c)) self._write_ccr(f, g, self.compression) if self.checksum: g.seek(0, 2) g.write(self._md5_compute(g)) self.path.unlink() # NOTE: for Windows this is necessary self.compressed_file.rename(self.path)
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "compressed_file", "is", "None", ":", "with", "self", ".", "path", ".", "open", "(", "'rb+'", ")", "as", "f", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "eof", "=", "f", ".", "tel...
Closes the CDF Class. 1. If compression was set, this is where the compressed file is written. 2. If a checksum is needed, this will place the checksum at the end of the file.
[ "Closes", "the", "CDF", "Class", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L253-L288
train
208,574
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._write_var_data_sparse
def _write_var_data_sparse(self, f, zVar, var, dataType, numElems, recVary, oneblock): ''' Writes a VVR and a VXR for this block of sparse data Parameters: f : file The open CDF file zVar : bool True if this is for a z variable var : int The variable number dataType : int The CDF data type of this variable numElems : str The number of elements in each record recVary : bool True if the value varies across records oneblock: list A list of data in the form [startrec, endrec, [data]] Returns: recend : int Just the "endrec" value input by the user in "oneblock" ''' rec_start = oneblock[0] rec_end = oneblock[1] indata = oneblock[2] numValues = self._num_values(zVar, var) # Convert oneblock[2] into a byte stream _, data = self._convert_data(dataType, numElems, numValues, indata) # Gather dimension information if zVar: vdr_offset = self.zvarsinfo[var][1] else: vdr_offset = self.rvarsinfo[var][1] # Write one VVR offset = self._write_vvr(f, data) f.seek(vdr_offset+28, 0) # Get first VXR vxrOne = int.from_bytes(f.read(8), 'big', signed=True) foundSpot = 0 usedEntries = 0 currentVXR = 0 # Search through VXRs to find an open one while foundSpot == 0 and vxrOne > 0: # have a VXR f.seek(vxrOne, 0) currentVXR = f.tell() f.seek(vxrOne+12, 0) vxrNext = int.from_bytes(f.read(8), 'big', signed=True) nEntries = int.from_bytes(f.read(4), 'big', signed=True) usedEntries = int.from_bytes(f.read(4), 'big', signed=True) if (usedEntries == nEntries): # all entries are used -- check the next vxr in link vxrOne = vxrNext else: # found a vxr with an vailable entry spot foundSpot = 1 # vxrOne == 0 from vdr's vxrhead vxrOne == -1 from a vxr's vxrnext if (vxrOne == 0 or vxrOne == -1): # no available vxr... create a new one currentVXR = self._create_vxr(f, rec_start, rec_end, vdr_offset, currentVXR, offset) else: self._use_vxrentry(f, currentVXR, rec_start, rec_end, offset) # Modify the VDR's MaxRec if needed f.seek(vdr_offset+24, 0) recNumc = int.from_bytes(f.read(4), 'big', signed=True) if (rec_end > recNumc): self._update_offset_value(f, vdr_offset+24, 4, rec_end) return rec_end
python
def _write_var_data_sparse(self, f, zVar, var, dataType, numElems, recVary, oneblock): ''' Writes a VVR and a VXR for this block of sparse data Parameters: f : file The open CDF file zVar : bool True if this is for a z variable var : int The variable number dataType : int The CDF data type of this variable numElems : str The number of elements in each record recVary : bool True if the value varies across records oneblock: list A list of data in the form [startrec, endrec, [data]] Returns: recend : int Just the "endrec" value input by the user in "oneblock" ''' rec_start = oneblock[0] rec_end = oneblock[1] indata = oneblock[2] numValues = self._num_values(zVar, var) # Convert oneblock[2] into a byte stream _, data = self._convert_data(dataType, numElems, numValues, indata) # Gather dimension information if zVar: vdr_offset = self.zvarsinfo[var][1] else: vdr_offset = self.rvarsinfo[var][1] # Write one VVR offset = self._write_vvr(f, data) f.seek(vdr_offset+28, 0) # Get first VXR vxrOne = int.from_bytes(f.read(8), 'big', signed=True) foundSpot = 0 usedEntries = 0 currentVXR = 0 # Search through VXRs to find an open one while foundSpot == 0 and vxrOne > 0: # have a VXR f.seek(vxrOne, 0) currentVXR = f.tell() f.seek(vxrOne+12, 0) vxrNext = int.from_bytes(f.read(8), 'big', signed=True) nEntries = int.from_bytes(f.read(4), 'big', signed=True) usedEntries = int.from_bytes(f.read(4), 'big', signed=True) if (usedEntries == nEntries): # all entries are used -- check the next vxr in link vxrOne = vxrNext else: # found a vxr with an vailable entry spot foundSpot = 1 # vxrOne == 0 from vdr's vxrhead vxrOne == -1 from a vxr's vxrnext if (vxrOne == 0 or vxrOne == -1): # no available vxr... create a new one currentVXR = self._create_vxr(f, rec_start, rec_end, vdr_offset, currentVXR, offset) else: self._use_vxrentry(f, currentVXR, rec_start, rec_end, offset) # Modify the VDR's MaxRec if needed f.seek(vdr_offset+24, 0) recNumc = int.from_bytes(f.read(4), 'big', signed=True) if (rec_end > recNumc): self._update_offset_value(f, vdr_offset+24, 4, rec_end) return rec_end
[ "def", "_write_var_data_sparse", "(", "self", ",", "f", ",", "zVar", ",", "var", ",", "dataType", ",", "numElems", ",", "recVary", ",", "oneblock", ")", ":", "rec_start", "=", "oneblock", "[", "0", "]", "rec_end", "=", "oneblock", "[", "1", "]", "indat...
Writes a VVR and a VXR for this block of sparse data Parameters: f : file The open CDF file zVar : bool True if this is for a z variable var : int The variable number dataType : int The CDF data type of this variable numElems : str The number of elements in each record recVary : bool True if the value varies across records oneblock: list A list of data in the form [startrec, endrec, [data]] Returns: recend : int Just the "endrec" value input by the user in "oneblock"
[ "Writes", "a", "VVR", "and", "a", "VXR", "for", "this", "block", "of", "sparse", "data" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1075-L1155
train
208,575
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._create_vxr
def _create_vxr(self, f, recStart, recEnd, currentVDR, priorVXR, vvrOffset): ''' Create a VXR AND use a VXR Parameters: f : file The open CDF file recStart : int The start record of this block recEnd : int The ending record of this block currentVDR : int The byte location of the variables VDR priorVXR : int The byte location of the previous VXR vvrOffset : int The byte location of ther VVR Returns: vxroffset : int The byte location of the created vxr ''' # add a VXR, use an entry, and link it to the prior VXR if it exists vxroffset = self._write_vxr(f) self._use_vxrentry(f, vxroffset, recStart, recEnd, vvrOffset) if (priorVXR == 0): # VDR's VXRhead self._update_offset_value(f, currentVDR+28, 8, vxroffset) else: # VXR's next self._update_offset_value(f, priorVXR+12, 8, vxroffset) # VDR's VXRtail self._update_offset_value(f, currentVDR+36, 8, vxroffset) return vxroffset
python
def _create_vxr(self, f, recStart, recEnd, currentVDR, priorVXR, vvrOffset): ''' Create a VXR AND use a VXR Parameters: f : file The open CDF file recStart : int The start record of this block recEnd : int The ending record of this block currentVDR : int The byte location of the variables VDR priorVXR : int The byte location of the previous VXR vvrOffset : int The byte location of ther VVR Returns: vxroffset : int The byte location of the created vxr ''' # add a VXR, use an entry, and link it to the prior VXR if it exists vxroffset = self._write_vxr(f) self._use_vxrentry(f, vxroffset, recStart, recEnd, vvrOffset) if (priorVXR == 0): # VDR's VXRhead self._update_offset_value(f, currentVDR+28, 8, vxroffset) else: # VXR's next self._update_offset_value(f, priorVXR+12, 8, vxroffset) # VDR's VXRtail self._update_offset_value(f, currentVDR+36, 8, vxroffset) return vxroffset
[ "def", "_create_vxr", "(", "self", ",", "f", ",", "recStart", ",", "recEnd", ",", "currentVDR", ",", "priorVXR", ",", "vvrOffset", ")", ":", "# add a VXR, use an entry, and link it to the prior VXR if it exists", "vxroffset", "=", "self", ".", "_write_vxr", "(", "f"...
Create a VXR AND use a VXR Parameters: f : file The open CDF file recStart : int The start record of this block recEnd : int The ending record of this block currentVDR : int The byte location of the variables VDR priorVXR : int The byte location of the previous VXR vvrOffset : int The byte location of ther VVR Returns: vxroffset : int The byte location of the created vxr
[ "Create", "a", "VXR", "AND", "use", "a", "VXR" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1157-L1191
train
208,576
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._use_vxrentry
def _use_vxrentry(self, f, VXRoffset, recStart, recEnd, offset): ''' Adds a VVR pointer to a VXR ''' # Select the next unused entry in a VXR for a VVR/CVVR f.seek(VXRoffset+20) # num entries numEntries = int.from_bytes(f.read(4), 'big', signed=True) # used entries usedEntries = int.from_bytes(f.read(4), 'big', signed=True) # VXR's First self._update_offset_value(f, VXRoffset+28+4*usedEntries, 4, recStart) # VXR's Last self._update_offset_value(f, VXRoffset+28+4*numEntries+4*usedEntries, 4, recEnd) # VXR's Offset self._update_offset_value(f, VXRoffset+28+2*4*numEntries+8*usedEntries, 8, offset) # VXR's NusedEntries usedEntries += 1 self._update_offset_value(f, VXRoffset+24, 4, usedEntries) return usedEntries
python
def _use_vxrentry(self, f, VXRoffset, recStart, recEnd, offset): ''' Adds a VVR pointer to a VXR ''' # Select the next unused entry in a VXR for a VVR/CVVR f.seek(VXRoffset+20) # num entries numEntries = int.from_bytes(f.read(4), 'big', signed=True) # used entries usedEntries = int.from_bytes(f.read(4), 'big', signed=True) # VXR's First self._update_offset_value(f, VXRoffset+28+4*usedEntries, 4, recStart) # VXR's Last self._update_offset_value(f, VXRoffset+28+4*numEntries+4*usedEntries, 4, recEnd) # VXR's Offset self._update_offset_value(f, VXRoffset+28+2*4*numEntries+8*usedEntries, 8, offset) # VXR's NusedEntries usedEntries += 1 self._update_offset_value(f, VXRoffset+24, 4, usedEntries) return usedEntries
[ "def", "_use_vxrentry", "(", "self", ",", "f", ",", "VXRoffset", ",", "recStart", ",", "recEnd", ",", "offset", ")", ":", "# Select the next unused entry in a VXR for a VVR/CVVR", "f", ".", "seek", "(", "VXRoffset", "+", "20", ")", "# num entries", "numEntries", ...
Adds a VVR pointer to a VXR
[ "Adds", "a", "VVR", "pointer", "to", "a", "VXR" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1193-L1214
train
208,577
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._add_vxr_levels_r
def _add_vxr_levels_r(self, f, vxrhead, numVXRs): ''' Build a new level of VXRs... make VXRs more tree-like From: VXR1 -> VXR2 -> VXR3 -> VXR4 -> ... -> VXRn To: new VXR1 / | \ VXR2 VXR3 VXR4 / | \ ... VXR5 .......... VXRn Parameters: f : file The open CDF file vxrhead : int The byte location of the first VXR for a variable numVXRs : int The total number of VXRs Returns: newVXRhead : int The byte location of the newest VXR head newvxroff : int The byte location of the last VXR head ''' newNumVXRs = int(numVXRs / CDF.NUM_VXRlvl_ENTRIES) remaining = int(numVXRs % CDF.NUM_VXRlvl_ENTRIES) vxroff = vxrhead prevxroff = -1 if (remaining != 0): newNumVXRs += 1 CDF.level += 1 for x in range(0, newNumVXRs): newvxroff = self._write_vxr(f, numEntries=CDF.NUM_VXRlvl_ENTRIES) if (x > 0): self._update_offset_value(f, prevxroff+12, 8, newvxroff) else: newvxrhead = newvxroff prevxroff = newvxroff if (x == (newNumVXRs - 1)): if (remaining == 0): endEntry = CDF.NUM_VXRlvl_ENTRIES else: endEntry = remaining else: endEntry = CDF.NUM_VXRlvl_ENTRIES for _ in range(0, endEntry): recFirst, recLast = self._get_recrange(f, vxroff) self._use_vxrentry(f, newvxroff, recFirst, recLast, vxroff) vxroff = self._read_offset_value(f, vxroff+12, 8) vxroff = vxrhead # Break the horizontal links for x in range(0, numVXRs): nvxroff = self._read_offset_value(f, vxroff+12, 8) self._update_offset_value(f, vxroff+12, 8, 0) vxroff = nvxroff # Iterate this process if we're over NUM_VXRlvl_ENTRIES if (newNumVXRs > CDF.NUM_VXRlvl_ENTRIES): return self._add_vxr_levels_r(f, newvxrhead, newNumVXRs) else: return newvxrhead, newvxroff
python
def _add_vxr_levels_r(self, f, vxrhead, numVXRs): ''' Build a new level of VXRs... make VXRs more tree-like From: VXR1 -> VXR2 -> VXR3 -> VXR4 -> ... -> VXRn To: new VXR1 / | \ VXR2 VXR3 VXR4 / | \ ... VXR5 .......... VXRn Parameters: f : file The open CDF file vxrhead : int The byte location of the first VXR for a variable numVXRs : int The total number of VXRs Returns: newVXRhead : int The byte location of the newest VXR head newvxroff : int The byte location of the last VXR head ''' newNumVXRs = int(numVXRs / CDF.NUM_VXRlvl_ENTRIES) remaining = int(numVXRs % CDF.NUM_VXRlvl_ENTRIES) vxroff = vxrhead prevxroff = -1 if (remaining != 0): newNumVXRs += 1 CDF.level += 1 for x in range(0, newNumVXRs): newvxroff = self._write_vxr(f, numEntries=CDF.NUM_VXRlvl_ENTRIES) if (x > 0): self._update_offset_value(f, prevxroff+12, 8, newvxroff) else: newvxrhead = newvxroff prevxroff = newvxroff if (x == (newNumVXRs - 1)): if (remaining == 0): endEntry = CDF.NUM_VXRlvl_ENTRIES else: endEntry = remaining else: endEntry = CDF.NUM_VXRlvl_ENTRIES for _ in range(0, endEntry): recFirst, recLast = self._get_recrange(f, vxroff) self._use_vxrentry(f, newvxroff, recFirst, recLast, vxroff) vxroff = self._read_offset_value(f, vxroff+12, 8) vxroff = vxrhead # Break the horizontal links for x in range(0, numVXRs): nvxroff = self._read_offset_value(f, vxroff+12, 8) self._update_offset_value(f, vxroff+12, 8, 0) vxroff = nvxroff # Iterate this process if we're over NUM_VXRlvl_ENTRIES if (newNumVXRs > CDF.NUM_VXRlvl_ENTRIES): return self._add_vxr_levels_r(f, newvxrhead, newNumVXRs) else: return newvxrhead, newvxroff
[ "def", "_add_vxr_levels_r", "(", "self", ",", "f", ",", "vxrhead", ",", "numVXRs", ")", ":", "newNumVXRs", "=", "int", "(", "numVXRs", "/", "CDF", ".", "NUM_VXRlvl_ENTRIES", ")", "remaining", "=", "int", "(", "numVXRs", "%", "CDF", ".", "NUM_VXRlvl_ENTRIES...
Build a new level of VXRs... make VXRs more tree-like From: VXR1 -> VXR2 -> VXR3 -> VXR4 -> ... -> VXRn To: new VXR1 / | \ VXR2 VXR3 VXR4 / | \ ... VXR5 .......... VXRn Parameters: f : file The open CDF file vxrhead : int The byte location of the first VXR for a variable numVXRs : int The total number of VXRs Returns: newVXRhead : int The byte location of the newest VXR head newvxroff : int The byte location of the last VXR head
[ "Build", "a", "new", "level", "of", "VXRs", "...", "make", "VXRs", "more", "tree", "-", "like" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1216-L1284
train
208,578
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._update_vdr_vxrheadtail
def _update_vdr_vxrheadtail(self, f, vdr_offset, VXRoffset): ''' This sets a VXR to be the first and last VXR in the VDR ''' # VDR's VXRhead self._update_offset_value(f, vdr_offset+28, 8, VXRoffset) # VDR's VXRtail self._update_offset_value(f, vdr_offset+36, 8, VXRoffset)
python
def _update_vdr_vxrheadtail(self, f, vdr_offset, VXRoffset): ''' This sets a VXR to be the first and last VXR in the VDR ''' # VDR's VXRhead self._update_offset_value(f, vdr_offset+28, 8, VXRoffset) # VDR's VXRtail self._update_offset_value(f, vdr_offset+36, 8, VXRoffset)
[ "def", "_update_vdr_vxrheadtail", "(", "self", ",", "f", ",", "vdr_offset", ",", "VXRoffset", ")", ":", "# VDR's VXRhead", "self", ".", "_update_offset_value", "(", "f", ",", "vdr_offset", "+", "28", ",", "8", ",", "VXRoffset", ")", "# VDR's VXRtail", "self", ...
This sets a VXR to be the first and last VXR in the VDR
[ "This", "sets", "a", "VXR", "to", "be", "the", "first", "and", "last", "VXR", "in", "the", "VDR" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1286-L1293
train
208,579
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._get_recrange
def _get_recrange(self, f, VXRoffset): ''' Finds the first and last record numbers pointed by the VXR Assumes the VXRs are in order ''' f.seek(VXRoffset+20) # Num entries numEntries = int.from_bytes(f.read(4), 'big', signed=True) # used entries usedEntries = int.from_bytes(f.read(4), 'big', signed=True) # VXR's First record firstRec = int.from_bytes(f.read(4), 'big', signed=True) # VXR's Last record f.seek(VXRoffset+28+(4*numEntries+4*(usedEntries-1))) lastRec = int.from_bytes(f.read(4), 'big', signed=True) return firstRec, lastRec
python
def _get_recrange(self, f, VXRoffset): ''' Finds the first and last record numbers pointed by the VXR Assumes the VXRs are in order ''' f.seek(VXRoffset+20) # Num entries numEntries = int.from_bytes(f.read(4), 'big', signed=True) # used entries usedEntries = int.from_bytes(f.read(4), 'big', signed=True) # VXR's First record firstRec = int.from_bytes(f.read(4), 'big', signed=True) # VXR's Last record f.seek(VXRoffset+28+(4*numEntries+4*(usedEntries-1))) lastRec = int.from_bytes(f.read(4), 'big', signed=True) return firstRec, lastRec
[ "def", "_get_recrange", "(", "self", ",", "f", ",", "VXRoffset", ")", ":", "f", ".", "seek", "(", "VXRoffset", "+", "20", ")", "# Num entries", "numEntries", "=", "int", ".", "from_bytes", "(", "f", ".", "read", "(", "4", ")", ",", "'big'", ",", "s...
Finds the first and last record numbers pointed by the VXR Assumes the VXRs are in order
[ "Finds", "the", "first", "and", "last", "record", "numbers", "pointed", "by", "the", "VXR", "Assumes", "the", "VXRs", "are", "in", "order" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1295-L1310
train
208,580
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._datatype_size
def _datatype_size(datatype, numElms): # @NoSelf ''' Gets datatype size Parameters: datatype : int CDF variable data type numElms : int number of elements Returns: numBytes : int The number of bytes for the data ''' sizes = {1: 1, 2: 2, 4: 4, 8: 8, 11: 1, 12: 2, 14: 4, 21: 4, 22: 8, 31: 8, 32: 16, 33: 8, 41: 1, 44: 4, 45: 8, 51: 1, 52: 1} try: if (isinstance(datatype, int)): if (datatype == 51 or datatype == 52): return numElms else: return sizes[datatype] else: datatype = datatype.upper() if (datatype == 'CDF_INT1' or datatype == 'CDF_UINT1' or datatype == 'CDF_BYTE'): return 1 elif (datatype == 'CDF_INT2' or datatype == 'CDF_UINT2'): return 2 elif (datatype == 'CDF_INT4' or datatype == 'CDF_UINT4'): return 4 elif (datatype == 'CDF_INT8' or datatype == 'CDF_TIME_TT2000'): return 8 elif (datatype == 'CDF_REAL4' or datatype == 'CDF_FLOAT'): return 4 elif (datatype == 'CDF_REAL8' or datatype == 'CDF_DOUBLE' or datatype == 'CDF_EPOCH'): return 8 elif (datatype == 'CDF_EPOCH16'): return 16 elif (datatype == 'CDF_CHAR' or datatype == 'CDF_UCHAR'): return numElms else: return -1 except Exception: return -1
python
def _datatype_size(datatype, numElms): # @NoSelf ''' Gets datatype size Parameters: datatype : int CDF variable data type numElms : int number of elements Returns: numBytes : int The number of bytes for the data ''' sizes = {1: 1, 2: 2, 4: 4, 8: 8, 11: 1, 12: 2, 14: 4, 21: 4, 22: 8, 31: 8, 32: 16, 33: 8, 41: 1, 44: 4, 45: 8, 51: 1, 52: 1} try: if (isinstance(datatype, int)): if (datatype == 51 or datatype == 52): return numElms else: return sizes[datatype] else: datatype = datatype.upper() if (datatype == 'CDF_INT1' or datatype == 'CDF_UINT1' or datatype == 'CDF_BYTE'): return 1 elif (datatype == 'CDF_INT2' or datatype == 'CDF_UINT2'): return 2 elif (datatype == 'CDF_INT4' or datatype == 'CDF_UINT4'): return 4 elif (datatype == 'CDF_INT8' or datatype == 'CDF_TIME_TT2000'): return 8 elif (datatype == 'CDF_REAL4' or datatype == 'CDF_FLOAT'): return 4 elif (datatype == 'CDF_REAL8' or datatype == 'CDF_DOUBLE' or datatype == 'CDF_EPOCH'): return 8 elif (datatype == 'CDF_EPOCH16'): return 16 elif (datatype == 'CDF_CHAR' or datatype == 'CDF_UCHAR'): return numElms else: return -1 except Exception: return -1
[ "def", "_datatype_size", "(", "datatype", ",", "numElms", ")", ":", "# @NoSelf", "sizes", "=", "{", "1", ":", "1", ",", "2", ":", "2", ",", "4", ":", "4", ",", "8", ":", "8", ",", "11", ":", "1", ",", "12", ":", "2", ",", "14", ":", "4", ...
Gets datatype size Parameters: datatype : int CDF variable data type numElms : int number of elements Returns: numBytes : int The number of bytes for the data
[ "Gets", "datatype", "size" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1392-L1452
train
208,581
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._write_adr
def _write_adr(self, f, gORv, name): ''' Writes and ADR to the end of the file. Additionally, it will update the offset values to either the previous ADR or the ADRhead field in the GDR. Parameters: f : file The open CDF file gORv : bool True if a global attribute, False if variable attribute name : str name of the attribute Returns: num : int The attribute number byte_loc : int The current location in file f ''' f.seek(0, 2) byte_loc = f.tell() block_size = CDF.ADR_BASE_SIZE64 section_type = CDF.ADR_ nextADR = 0 headAgrEDR = 0 if (gORv == True): scope = 1 else: scope = 2 num = len(self.attrs) ngrEntries = 0 maxgrEntry = -1 rfuA = 0 headAzEDR = 0 nzEntries = 0 maxzEntry = -1 rfuE = -1 adr = bytearray(block_size) adr[0:8] = struct.pack('>q', block_size) adr[8:12] = struct.pack('>i', section_type) adr[12:20] = struct.pack('>q', nextADR) adr[20:28] = struct.pack('>q', headAgrEDR) adr[28:32] = struct.pack('>i', scope) adr[32:36] = struct.pack('>i', num) adr[36:40] = struct.pack('>i', ngrEntries) adr[40:44] = struct.pack('>i', maxgrEntry) adr[44:48] = struct.pack('>i', rfuA) adr[48:56] = struct.pack('>q', headAzEDR) adr[56:60] = struct.pack('>i', nzEntries) adr[60:64] = struct.pack('>i', maxzEntry) adr[64:68] = struct.pack('>i', rfuE) tofill = 256 - len(name) adr[68:324] = (name+'\0'*tofill).encode() f.write(adr) info = [] info.append(name) info.append(scope) info.append(byte_loc) self.attrsinfo[num] = info if (scope == 1): self.gattrs.append(name) else: self.vattrs.append(name) self.attrs.append(name) if (num > 0): # ADR's ADRnext self._update_offset_value(f, self.attrsinfo[num-1][2]+12, 8, byte_loc) else: # GDR's ADRhead self._update_offset_value(f, self.gdr_head+28, 8, byte_loc) # GDR's NumAttr self._update_offset_value(f, self.gdr_head+48, 4, num+1) return num, byte_loc
python
def _write_adr(self, f, gORv, name): ''' Writes and ADR to the end of the file. Additionally, it will update the offset values to either the previous ADR or the ADRhead field in the GDR. Parameters: f : file The open CDF file gORv : bool True if a global attribute, False if variable attribute name : str name of the attribute Returns: num : int The attribute number byte_loc : int The current location in file f ''' f.seek(0, 2) byte_loc = f.tell() block_size = CDF.ADR_BASE_SIZE64 section_type = CDF.ADR_ nextADR = 0 headAgrEDR = 0 if (gORv == True): scope = 1 else: scope = 2 num = len(self.attrs) ngrEntries = 0 maxgrEntry = -1 rfuA = 0 headAzEDR = 0 nzEntries = 0 maxzEntry = -1 rfuE = -1 adr = bytearray(block_size) adr[0:8] = struct.pack('>q', block_size) adr[8:12] = struct.pack('>i', section_type) adr[12:20] = struct.pack('>q', nextADR) adr[20:28] = struct.pack('>q', headAgrEDR) adr[28:32] = struct.pack('>i', scope) adr[32:36] = struct.pack('>i', num) adr[36:40] = struct.pack('>i', ngrEntries) adr[40:44] = struct.pack('>i', maxgrEntry) adr[44:48] = struct.pack('>i', rfuA) adr[48:56] = struct.pack('>q', headAzEDR) adr[56:60] = struct.pack('>i', nzEntries) adr[60:64] = struct.pack('>i', maxzEntry) adr[64:68] = struct.pack('>i', rfuE) tofill = 256 - len(name) adr[68:324] = (name+'\0'*tofill).encode() f.write(adr) info = [] info.append(name) info.append(scope) info.append(byte_loc) self.attrsinfo[num] = info if (scope == 1): self.gattrs.append(name) else: self.vattrs.append(name) self.attrs.append(name) if (num > 0): # ADR's ADRnext self._update_offset_value(f, self.attrsinfo[num-1][2]+12, 8, byte_loc) else: # GDR's ADRhead self._update_offset_value(f, self.gdr_head+28, 8, byte_loc) # GDR's NumAttr self._update_offset_value(f, self.gdr_head+48, 4, num+1) return num, byte_loc
[ "def", "_write_adr", "(", "self", ",", "f", ",", "gORv", ",", "name", ")", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "byte_loc", "=", "f", ".", "tell", "(", ")", "block_size", "=", "CDF", ".", "ADR_BASE_SIZE64", "section_type", "=", "CDF", ...
Writes and ADR to the end of the file. Additionally, it will update the offset values to either the previous ADR or the ADRhead field in the GDR. Parameters: f : file The open CDF file gORv : bool True if a global attribute, False if variable attribute name : str name of the attribute Returns: num : int The attribute number byte_loc : int The current location in file f
[ "Writes", "and", "ADR", "to", "the", "end", "of", "the", "file", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1554-L1633
train
208,582
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._write_aedr
def _write_aedr(self, f, gORz, attrNum, entryNum, value, pdataType, pnumElems, zVar): ''' Writes an aedr into the end of the file. Parameters: f : file The current open CDF file gORz : bool True if this entry is for a global or z variable, False if r variable attrNum : int Number of the attribute this aedr belongs to. entryNum : int Number of the entry value : The value of this entry pdataType : int The CDF data type of the value pnumElems : int Number of elements in the value. zVar : bool True if this entry belongs to a z variable Returns: byte_loc : int This current location in the file after writing the aedr. ''' f.seek(0, 2) byte_loc = f.tell() if (gORz == True or zVar != True): section_type = CDF.AgrEDR_ else: section_type = CDF.AzEDR_ nextAEDR = 0 if pdataType is None: # Figure out Data Type if not supplied if isinstance(value, (list, tuple)): avalue = value[0] else: avalue = value if (isinstance(avalue, int)): pdataType = CDF.CDF_INT8 elif (isinstance(avalue, float)): pdataType = CDF.CDF_FLOAT elif (isinstance(avalue, complex)): pdataType = CDF.CDF_EPOCH16 else: # assume a boolean pdataType = CDF.CDF_INT1 if pnumElems is None: # Figure out number of elements if not supplied if isinstance(value, str): pdataType = CDF.CDF_CHAR pnumElems = len(value) else: if isinstance(value, (list, tuple)): pnumElems = len(value) else: pnumElems = 1 dataType = pdataType numElems = pnumElems rfuB = 0 rfuC = 0 rfuD = -1 rfuE = -1 if gORz: numStrings = 0 else: if (isinstance(value, str)): numStrings = value.count('\\N ') + 1 else: numStrings = 0 recs, cdata = self._convert_data(dataType, numElems, 1, value) if (dataType == 51): numElems = len(cdata) block_size = len(cdata) + 56 aedr = bytearray(block_size) aedr[0:8] = struct.pack('>q', block_size) aedr[8:12] = struct.pack('>i', section_type) aedr[12:20] = struct.pack('>q', nextAEDR) aedr[20:24] = struct.pack('>i', attrNum) aedr[24:28] = struct.pack('>i', dataType) aedr[28:32] = struct.pack('>i', entryNum) aedr[32:36] = struct.pack('>i', numElems) aedr[36:40] = struct.pack('>i', numStrings) aedr[40:44] = struct.pack('>i', rfuB) aedr[44:48] = struct.pack('>i', rfuC) aedr[48:52] = struct.pack('>i', rfuD) aedr[52:56] = struct.pack('>i', rfuE) aedr[56:block_size] = cdata f.write(aedr) return byte_loc
python
def _write_aedr(self, f, gORz, attrNum, entryNum, value, pdataType, pnumElems, zVar): ''' Writes an aedr into the end of the file. Parameters: f : file The current open CDF file gORz : bool True if this entry is for a global or z variable, False if r variable attrNum : int Number of the attribute this aedr belongs to. entryNum : int Number of the entry value : The value of this entry pdataType : int The CDF data type of the value pnumElems : int Number of elements in the value. zVar : bool True if this entry belongs to a z variable Returns: byte_loc : int This current location in the file after writing the aedr. ''' f.seek(0, 2) byte_loc = f.tell() if (gORz == True or zVar != True): section_type = CDF.AgrEDR_ else: section_type = CDF.AzEDR_ nextAEDR = 0 if pdataType is None: # Figure out Data Type if not supplied if isinstance(value, (list, tuple)): avalue = value[0] else: avalue = value if (isinstance(avalue, int)): pdataType = CDF.CDF_INT8 elif (isinstance(avalue, float)): pdataType = CDF.CDF_FLOAT elif (isinstance(avalue, complex)): pdataType = CDF.CDF_EPOCH16 else: # assume a boolean pdataType = CDF.CDF_INT1 if pnumElems is None: # Figure out number of elements if not supplied if isinstance(value, str): pdataType = CDF.CDF_CHAR pnumElems = len(value) else: if isinstance(value, (list, tuple)): pnumElems = len(value) else: pnumElems = 1 dataType = pdataType numElems = pnumElems rfuB = 0 rfuC = 0 rfuD = -1 rfuE = -1 if gORz: numStrings = 0 else: if (isinstance(value, str)): numStrings = value.count('\\N ') + 1 else: numStrings = 0 recs, cdata = self._convert_data(dataType, numElems, 1, value) if (dataType == 51): numElems = len(cdata) block_size = len(cdata) + 56 aedr = bytearray(block_size) aedr[0:8] = struct.pack('>q', block_size) aedr[8:12] = struct.pack('>i', section_type) aedr[12:20] = struct.pack('>q', nextAEDR) aedr[20:24] = struct.pack('>i', attrNum) aedr[24:28] = struct.pack('>i', dataType) aedr[28:32] = struct.pack('>i', entryNum) aedr[32:36] = struct.pack('>i', numElems) aedr[36:40] = struct.pack('>i', numStrings) aedr[40:44] = struct.pack('>i', rfuB) aedr[44:48] = struct.pack('>i', rfuC) aedr[48:52] = struct.pack('>i', rfuD) aedr[52:56] = struct.pack('>i', rfuE) aedr[56:block_size] = cdata f.write(aedr) return byte_loc
[ "def", "_write_aedr", "(", "self", ",", "f", ",", "gORz", ",", "attrNum", ",", "entryNum", ",", "value", ",", "pdataType", ",", "pnumElems", ",", "zVar", ")", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "byte_loc", "=", "f", ".", "tell", "(",...
Writes an aedr into the end of the file. Parameters: f : file The current open CDF file gORz : bool True if this entry is for a global or z variable, False if r variable attrNum : int Number of the attribute this aedr belongs to. entryNum : int Number of the entry value : The value of this entry pdataType : int The CDF data type of the value pnumElems : int Number of elements in the value. zVar : bool True if this entry belongs to a z variable Returns: byte_loc : int This current location in the file after writing the aedr.
[ "Writes", "an", "aedr", "into", "the", "end", "of", "the", "file", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1635-L1731
train
208,583
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._write_vxr
def _write_vxr(self, f, numEntries=None): ''' Creates a VXR at the end of the file. Returns byte location of the VXR The First, Last, and Offset fields will need to be filled in later ''' f.seek(0, 2) byte_loc = f.tell() section_type = CDF.VXR_ nextVXR = 0 if (numEntries == None): nEntries = CDF.NUM_VXR_ENTRIES else: nEntries = int(numEntries) block_size = CDF.VXR_BASE_SIZE64 + (4 + 4 + 8) * nEntries nUsedEntries = 0 firsts = [-1] * nEntries lasts = [-1] * nEntries offsets = [-1] * nEntries vxr = bytearray(block_size) vxr[0:8] = struct.pack('>q', block_size) vxr[8:12] = struct.pack('>i', section_type) vxr[12:20] = struct.pack('>q', nextVXR) vxr[20:24] = struct.pack('>i', nEntries) vxr[24:28] = struct.pack('>i', nUsedEntries) estart = 28 + 4*nEntries vxr[28:estart] = struct.pack('>%si' % nEntries, *firsts) eend = estart + 4*nEntries vxr[estart:eend] = struct.pack('>%si' % nEntries, *lasts) vxr[eend:block_size] = struct.pack('>%sq' % nEntries, *offsets) f.write(vxr) return byte_loc
python
def _write_vxr(self, f, numEntries=None): ''' Creates a VXR at the end of the file. Returns byte location of the VXR The First, Last, and Offset fields will need to be filled in later ''' f.seek(0, 2) byte_loc = f.tell() section_type = CDF.VXR_ nextVXR = 0 if (numEntries == None): nEntries = CDF.NUM_VXR_ENTRIES else: nEntries = int(numEntries) block_size = CDF.VXR_BASE_SIZE64 + (4 + 4 + 8) * nEntries nUsedEntries = 0 firsts = [-1] * nEntries lasts = [-1] * nEntries offsets = [-1] * nEntries vxr = bytearray(block_size) vxr[0:8] = struct.pack('>q', block_size) vxr[8:12] = struct.pack('>i', section_type) vxr[12:20] = struct.pack('>q', nextVXR) vxr[20:24] = struct.pack('>i', nEntries) vxr[24:28] = struct.pack('>i', nUsedEntries) estart = 28 + 4*nEntries vxr[28:estart] = struct.pack('>%si' % nEntries, *firsts) eend = estart + 4*nEntries vxr[estart:eend] = struct.pack('>%si' % nEntries, *lasts) vxr[eend:block_size] = struct.pack('>%sq' % nEntries, *offsets) f.write(vxr) return byte_loc
[ "def", "_write_vxr", "(", "self", ",", "f", ",", "numEntries", "=", "None", ")", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "byte_loc", "=", "f", ".", "tell", "(", ")", "section_type", "=", "CDF", ".", "VXR_", "nextVXR", "=", "0", "if", "(...
Creates a VXR at the end of the file. Returns byte location of the VXR The First, Last, and Offset fields will need to be filled in later
[ "Creates", "a", "VXR", "at", "the", "end", "of", "the", "file", ".", "Returns", "byte", "location", "of", "the", "VXR", "The", "First", "Last", "and", "Offset", "fields", "will", "need", "to", "be", "filled", "in", "later" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1913-L1946
train
208,584
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._write_vvr
def _write_vvr(self, f, data): ''' Writes a vvr to the end of file "f" with the byte stream "data". ''' f.seek(0, 2) byte_loc = f.tell() block_size = CDF.VVR_BASE_SIZE64 + len(data) section_type = CDF.VVR_ vvr1 = bytearray(12) vvr1[0:8] = struct.pack('>q', block_size) vvr1[8:12] = struct.pack('>i', section_type) f.write(vvr1) f.write(data) return byte_loc
python
def _write_vvr(self, f, data): ''' Writes a vvr to the end of file "f" with the byte stream "data". ''' f.seek(0, 2) byte_loc = f.tell() block_size = CDF.VVR_BASE_SIZE64 + len(data) section_type = CDF.VVR_ vvr1 = bytearray(12) vvr1[0:8] = struct.pack('>q', block_size) vvr1[8:12] = struct.pack('>i', section_type) f.write(vvr1) f.write(data) return byte_loc
[ "def", "_write_vvr", "(", "self", ",", "f", ",", "data", ")", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "byte_loc", "=", "f", ".", "tell", "(", ")", "block_size", "=", "CDF", ".", "VVR_BASE_SIZE64", "+", "len", "(", "data", ")", "section_ty...
Writes a vvr to the end of file "f" with the byte stream "data".
[ "Writes", "a", "vvr", "to", "the", "end", "of", "file", "f", "with", "the", "byte", "stream", "data", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1948-L1963
train
208,585
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._write_cpr
def _write_cpr(self, f, cType, parameter) -> int: ''' Write compression info to the end of the file in a CPR. ''' f.seek(0, 2) byte_loc = f.tell() block_size = CDF.CPR_BASE_SIZE64 + 4 section_type = CDF.CPR_ rfuA = 0 pCount = 1 cpr = bytearray(block_size) cpr[0:8] = struct.pack('>q', block_size) cpr[8:12] = struct.pack('>i', section_type) cpr[12:16] = struct.pack('>i', cType) cpr[16:20] = struct.pack('>i', rfuA) cpr[20:24] = struct.pack('>i', pCount) cpr[24:28] = struct.pack('>i', parameter) f.write(cpr) return byte_loc
python
def _write_cpr(self, f, cType, parameter) -> int: ''' Write compression info to the end of the file in a CPR. ''' f.seek(0, 2) byte_loc = f.tell() block_size = CDF.CPR_BASE_SIZE64 + 4 section_type = CDF.CPR_ rfuA = 0 pCount = 1 cpr = bytearray(block_size) cpr[0:8] = struct.pack('>q', block_size) cpr[8:12] = struct.pack('>i', section_type) cpr[12:16] = struct.pack('>i', cType) cpr[16:20] = struct.pack('>i', rfuA) cpr[20:24] = struct.pack('>i', pCount) cpr[24:28] = struct.pack('>i', parameter) f.write(cpr) return byte_loc
[ "def", "_write_cpr", "(", "self", ",", "f", ",", "cType", ",", "parameter", ")", "->", "int", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "byte_loc", "=", "f", ".", "tell", "(", ")", "block_size", "=", "CDF", ".", "CPR_BASE_SIZE64", "+", "4",...
Write compression info to the end of the file in a CPR.
[ "Write", "compression", "info", "to", "the", "end", "of", "the", "file", "in", "a", "CPR", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1965-L1985
train
208,586
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._write_cvvr
def _write_cvvr(self, f, data): ''' Write compressed "data" variable to the end of the file in a CVVR ''' f.seek(0, 2) byte_loc = f.tell() cSize = len(data) block_size = CDF.CVVR_BASE_SIZE64 + cSize section_type = CDF.CVVR_ rfuA = 0 cvvr1 = bytearray(24) cvvr1[0:8] = struct.pack('>q', block_size) cvvr1[8:12] = struct.pack('>i', section_type) cvvr1[12:16] = struct.pack('>i', rfuA) cvvr1[16:24] = struct.pack('>q', cSize) f.write(cvvr1) f.write(data) return byte_loc
python
def _write_cvvr(self, f, data): ''' Write compressed "data" variable to the end of the file in a CVVR ''' f.seek(0, 2) byte_loc = f.tell() cSize = len(data) block_size = CDF.CVVR_BASE_SIZE64 + cSize section_type = CDF.CVVR_ rfuA = 0 cvvr1 = bytearray(24) cvvr1[0:8] = struct.pack('>q', block_size) cvvr1[8:12] = struct.pack('>i', section_type) cvvr1[12:16] = struct.pack('>i', rfuA) cvvr1[16:24] = struct.pack('>q', cSize) f.write(cvvr1) f.write(data) return byte_loc
[ "def", "_write_cvvr", "(", "self", ",", "f", ",", "data", ")", ":", "f", ".", "seek", "(", "0", ",", "2", ")", "byte_loc", "=", "f", ".", "tell", "(", ")", "cSize", "=", "len", "(", "data", ")", "block_size", "=", "CDF", ".", "CVVR_BASE_SIZE64", ...
Write compressed "data" variable to the end of the file in a CVVR
[ "Write", "compressed", "data", "variable", "to", "the", "end", "of", "the", "file", "in", "a", "CVVR" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L1987-L2006
train
208,587
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._write_ccr
def _write_ccr(self, f, g, level: int): ''' Write a CCR to file "g" from file "f" with level "level". Currently, only handles gzip compression. Parameters: f : file Uncompressed file to read from g : file File to read the compressed file into level : int The level of the compression from 0 to 9 Returns: None ''' f.seek(8) data = f.read() uSize = len(data) section_type = CDF.CCR_ rfuA = 0 cData = gzip.compress(data, level) block_size = CDF.CCR_BASE_SIZE64 + len(cData) cprOffset = 0 ccr1 = bytearray(32) #ccr1[0:4] = binascii.unhexlify(CDF.V3magicNUMBER_1) #ccr1[4:8] = binascii.unhexlify(CDF.V3magicNUMBER_2c) ccr1[0:8] = struct.pack('>q', block_size) ccr1[8:12] = struct.pack('>i', section_type) ccr1[12:20] = struct.pack('>q', cprOffset) ccr1[20:28] = struct.pack('>q', uSize) ccr1[28:32] = struct.pack('>i', rfuA) g.seek(0, 2) g.write(ccr1) g.write(cData) cprOffset = self._write_cpr(g, CDF.GZIP_COMPRESSION, level) self._update_offset_value(g, 20, 8, cprOffset)
python
def _write_ccr(self, f, g, level: int): ''' Write a CCR to file "g" from file "f" with level "level". Currently, only handles gzip compression. Parameters: f : file Uncompressed file to read from g : file File to read the compressed file into level : int The level of the compression from 0 to 9 Returns: None ''' f.seek(8) data = f.read() uSize = len(data) section_type = CDF.CCR_ rfuA = 0 cData = gzip.compress(data, level) block_size = CDF.CCR_BASE_SIZE64 + len(cData) cprOffset = 0 ccr1 = bytearray(32) #ccr1[0:4] = binascii.unhexlify(CDF.V3magicNUMBER_1) #ccr1[4:8] = binascii.unhexlify(CDF.V3magicNUMBER_2c) ccr1[0:8] = struct.pack('>q', block_size) ccr1[8:12] = struct.pack('>i', section_type) ccr1[12:20] = struct.pack('>q', cprOffset) ccr1[20:28] = struct.pack('>q', uSize) ccr1[28:32] = struct.pack('>i', rfuA) g.seek(0, 2) g.write(ccr1) g.write(cData) cprOffset = self._write_cpr(g, CDF.GZIP_COMPRESSION, level) self._update_offset_value(g, 20, 8, cprOffset)
[ "def", "_write_ccr", "(", "self", ",", "f", ",", "g", ",", "level", ":", "int", ")", ":", "f", ".", "seek", "(", "8", ")", "data", "=", "f", ".", "read", "(", ")", "uSize", "=", "len", "(", "data", ")", "section_type", "=", "CDF", ".", "CCR_"...
Write a CCR to file "g" from file "f" with level "level". Currently, only handles gzip compression. Parameters: f : file Uncompressed file to read from g : file File to read the compressed file into level : int The level of the compression from 0 to 9 Returns: None
[ "Write", "a", "CCR", "to", "file", "g", "from", "file", "f", "with", "level", "level", ".", "Currently", "only", "handles", "gzip", "compression", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2008-L2044
train
208,588
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._convert_type
def _convert_type(data_type): # @NoSelf ''' Converts CDF data types into python types ''' if data_type in (1, 41): dt_string = 'b' elif data_type == 2: dt_string = 'h' elif data_type == 4: dt_string = 'i' elif data_type in (8, 33): dt_string = 'q' elif data_type == 11: dt_string = 'B' elif data_type == 12: dt_string = 'H' elif data_type == 14: dt_string = 'I' elif data_type in (21, 44): dt_string = 'f' elif data_type in (22, 45, 31): dt_string = 'd' elif data_type == 32: dt_string = 'd' elif data_type in (51, 52): dt_string = 's' else: dt_string = '' return dt_string
python
def _convert_type(data_type): # @NoSelf ''' Converts CDF data types into python types ''' if data_type in (1, 41): dt_string = 'b' elif data_type == 2: dt_string = 'h' elif data_type == 4: dt_string = 'i' elif data_type in (8, 33): dt_string = 'q' elif data_type == 11: dt_string = 'B' elif data_type == 12: dt_string = 'H' elif data_type == 14: dt_string = 'I' elif data_type in (21, 44): dt_string = 'f' elif data_type in (22, 45, 31): dt_string = 'd' elif data_type == 32: dt_string = 'd' elif data_type in (51, 52): dt_string = 's' else: dt_string = '' return dt_string
[ "def", "_convert_type", "(", "data_type", ")", ":", "# @NoSelf", "if", "data_type", "in", "(", "1", ",", "41", ")", ":", "dt_string", "=", "'b'", "elif", "data_type", "==", "2", ":", "dt_string", "=", "'h'", "elif", "data_type", "==", "4", ":", "dt_str...
Converts CDF data types into python types
[ "Converts", "CDF", "data", "types", "into", "python", "types" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2070-L2099
train
208,589
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._convert_nptype
def _convert_nptype(data_type, data): # @NoSelf ''' Converts "data" of CDF type "data_type" into a numpy array ''' if data_type in (1, 41): return np.int8(data).tobytes() elif data_type == 2: return np.int16(data).tobytes() elif data_type == 4: return np.int32(data).tobytes() elif (data_type == 8) or (data_type == 33): return np.int64(data).tobytes() elif data_type == 11: return np.uint8(data).tobytes() elif data_type == 12: return np.uint16(data).tobytes() elif data_type == 14: return np.uint32(data).tobytes() elif (data_type == 21) or (data_type == 44): return np.float32(data).tobytes() elif (data_type == 22) or (data_type == 45) or (data_type == 31): return np.float64(data).tobytes() elif (data_type == 32): return np.complex128(data).tobytes() else: return data
python
def _convert_nptype(data_type, data): # @NoSelf ''' Converts "data" of CDF type "data_type" into a numpy array ''' if data_type in (1, 41): return np.int8(data).tobytes() elif data_type == 2: return np.int16(data).tobytes() elif data_type == 4: return np.int32(data).tobytes() elif (data_type == 8) or (data_type == 33): return np.int64(data).tobytes() elif data_type == 11: return np.uint8(data).tobytes() elif data_type == 12: return np.uint16(data).tobytes() elif data_type == 14: return np.uint32(data).tobytes() elif (data_type == 21) or (data_type == 44): return np.float32(data).tobytes() elif (data_type == 22) or (data_type == 45) or (data_type == 31): return np.float64(data).tobytes() elif (data_type == 32): return np.complex128(data).tobytes() else: return data
[ "def", "_convert_nptype", "(", "data_type", ",", "data", ")", ":", "# @NoSelf", "if", "data_type", "in", "(", "1", ",", "41", ")", ":", "return", "np", ".", "int8", "(", "data", ")", ".", "tobytes", "(", ")", "elif", "data_type", "==", "2", ":", "r...
Converts "data" of CDF type "data_type" into a numpy array
[ "Converts", "data", "of", "CDF", "type", "data_type", "into", "a", "numpy", "array" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2101-L2126
train
208,590
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._default_pad
def _default_pad(self, data_type, numElems): ''' Determines the default pad data for a "data_type" ''' order = self._convert_option() if (data_type == 1) or (data_type == 41): pad_value = struct.pack(order+'b', -127) elif data_type == 2: pad_value = struct.pack(order+'h', -32767) elif data_type == 4: pad_value = struct.pack(order+'i', -2147483647) elif (data_type == 8) or (data_type == 33): pad_value = struct.pack(order+'q', -9223372036854775807) elif data_type == 11: pad_value = struct.pack(order+'B', 254) elif data_type == 12: pad_value = struct.pack(order+'H', 65534) elif data_type == 14: pad_value = struct.pack(order+'I', 4294967294) elif (data_type == 21) or (data_type == 44): pad_value = struct.pack(order+'f', -1.0E30) elif (data_type == 22) or (data_type == 45): pad_value = struct.pack(order+'d', -1.0E30) elif (data_type == 31): pad_value = struct.pack(order+'d', 0.0) elif (data_type == 32): pad_value = struct.pack(order+'2d', *[0.0, 0.0]) elif (data_type == 51) or (data_type == 52): tmpPad = str(' '*numElems).encode() form = str(numElems) pad_value = struct.pack(form+'b', *tmpPad) return pad_value
python
def _default_pad(self, data_type, numElems): ''' Determines the default pad data for a "data_type" ''' order = self._convert_option() if (data_type == 1) or (data_type == 41): pad_value = struct.pack(order+'b', -127) elif data_type == 2: pad_value = struct.pack(order+'h', -32767) elif data_type == 4: pad_value = struct.pack(order+'i', -2147483647) elif (data_type == 8) or (data_type == 33): pad_value = struct.pack(order+'q', -9223372036854775807) elif data_type == 11: pad_value = struct.pack(order+'B', 254) elif data_type == 12: pad_value = struct.pack(order+'H', 65534) elif data_type == 14: pad_value = struct.pack(order+'I', 4294967294) elif (data_type == 21) or (data_type == 44): pad_value = struct.pack(order+'f', -1.0E30) elif (data_type == 22) or (data_type == 45): pad_value = struct.pack(order+'d', -1.0E30) elif (data_type == 31): pad_value = struct.pack(order+'d', 0.0) elif (data_type == 32): pad_value = struct.pack(order+'2d', *[0.0, 0.0]) elif (data_type == 51) or (data_type == 52): tmpPad = str(' '*numElems).encode() form = str(numElems) pad_value = struct.pack(form+'b', *tmpPad) return pad_value
[ "def", "_default_pad", "(", "self", ",", "data_type", ",", "numElems", ")", ":", "order", "=", "self", ".", "_convert_option", "(", ")", "if", "(", "data_type", "==", "1", ")", "or", "(", "data_type", "==", "41", ")", ":", "pad_value", "=", "struct", ...
Determines the default pad data for a "data_type"
[ "Determines", "the", "default", "pad", "data", "for", "a", "data_type" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2128-L2159
train
208,591
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._num_values
def _num_values(self, zVar, varNum): ''' Determines the number of values in a record. Set zVar=True if this is a zvariable. ''' values = 1 if (zVar == True): numDims = self.zvarsinfo[varNum][2] dimSizes = self.zvarsinfo[varNum][3] dimVary = self.zvarsinfo[varNum][4] else: numDims = self.rvarsinfo[varNum][2] dimSizes = self.rvarsinfo[varNum][3] dimVary = self.rvarsinfo[varNum][4] if (numDims < 1): return values else: for x in range(0, numDims): if (zVar == True): values = values * dimSizes[x] else: if (dimVary[x] != 0): values = values * dimSizes[x] return values
python
def _num_values(self, zVar, varNum): ''' Determines the number of values in a record. Set zVar=True if this is a zvariable. ''' values = 1 if (zVar == True): numDims = self.zvarsinfo[varNum][2] dimSizes = self.zvarsinfo[varNum][3] dimVary = self.zvarsinfo[varNum][4] else: numDims = self.rvarsinfo[varNum][2] dimSizes = self.rvarsinfo[varNum][3] dimVary = self.rvarsinfo[varNum][4] if (numDims < 1): return values else: for x in range(0, numDims): if (zVar == True): values = values * dimSizes[x] else: if (dimVary[x] != 0): values = values * dimSizes[x] return values
[ "def", "_num_values", "(", "self", ",", "zVar", ",", "varNum", ")", ":", "values", "=", "1", "if", "(", "zVar", "==", "True", ")", ":", "numDims", "=", "self", ".", "zvarsinfo", "[", "varNum", "]", "[", "2", "]", "dimSizes", "=", "self", ".", "zv...
Determines the number of values in a record. Set zVar=True if this is a zvariable.
[ "Determines", "the", "number", "of", "values", "in", "a", "record", ".", "Set", "zVar", "=", "True", "if", "this", "is", "a", "zvariable", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2269-L2292
train
208,592
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._read_offset_value
def _read_offset_value(self, f, offset, size): ''' Reads an integer value from file "f" at location "offset". ''' f.seek(offset, 0) if (size == 8): return int.from_bytes(f.read(8), 'big', signed=True) else: return int.from_bytes(f.read(4), 'big', signed=True)
python
def _read_offset_value(self, f, offset, size): ''' Reads an integer value from file "f" at location "offset". ''' f.seek(offset, 0) if (size == 8): return int.from_bytes(f.read(8), 'big', signed=True) else: return int.from_bytes(f.read(4), 'big', signed=True)
[ "def", "_read_offset_value", "(", "self", ",", "f", ",", "offset", ",", "size", ")", ":", "f", ".", "seek", "(", "offset", ",", "0", ")", "if", "(", "size", "==", "8", ")", ":", "return", "int", ".", "from_bytes", "(", "f", ".", "read", "(", "8...
Reads an integer value from file "f" at location "offset".
[ "Reads", "an", "integer", "value", "from", "file", "f", "at", "location", "offset", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2294-L2302
train
208,593
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._update_offset_value
def _update_offset_value(self, f, offset, size, value): ''' Writes "value" into location "offset" in file "f". ''' f.seek(offset, 0) if (size == 8): f.write(struct.pack('>q', value)) else: f.write(struct.pack('>i', value))
python
def _update_offset_value(self, f, offset, size, value): ''' Writes "value" into location "offset" in file "f". ''' f.seek(offset, 0) if (size == 8): f.write(struct.pack('>q', value)) else: f.write(struct.pack('>i', value))
[ "def", "_update_offset_value", "(", "self", ",", "f", ",", "offset", ",", "size", ",", "value", ")", ":", "f", ".", "seek", "(", "offset", ",", "0", ")", "if", "(", "size", "==", "8", ")", ":", "f", ".", "write", "(", "struct", ".", "pack", "("...
Writes "value" into location "offset" in file "f".
[ "Writes", "value", "into", "location", "offset", "in", "file", "f", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2304-L2312
train
208,594
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._update_aedr_link
def _update_aedr_link(self, f, attrNum, zVar, varNum, offset): ''' Updates variable aedr links Parameters: f : file The open CDF file attrNum : int The number of the attribute to change zVar : bool True if we are updating a z variable attribute varNum : int The variable number associated with this aedr offset : int The offset in the file to the AEDR Returns: None ''' # The offset to this AEDR's ADR adr_offset = self.attrsinfo[attrNum][2] # Get the number of entries if zVar: f.seek(adr_offset+56, 0) # ADR's NzEntries entries = int.from_bytes(f.read(4), 'big', signed=True) # ADR's MAXzEntry maxEntry = int.from_bytes(f.read(4), 'big', signed=True) else: f.seek(adr_offset+36, 0) # ADR's NgrEntries entries = int.from_bytes(f.read(4), 'big', signed=True) # ADR's MAXgrEntry maxEntry = int.from_bytes(f.read(4), 'big', signed=True) if (entries == 0): # If this is the first entry, update the ADR to reflect if zVar: # AzEDRhead self._update_offset_value(f, adr_offset+48, 8, offset) # NzEntries self._update_offset_value(f, adr_offset+56, 4, 1) # MaxzEntry self._update_offset_value(f, adr_offset+60, 4, varNum) else: # AgrEDRhead self._update_offset_value(f, adr_offset+20, 8, offset) # NgrEntries self._update_offset_value(f, adr_offset+36, 4, 1) # MaxgrEntry self._update_offset_value(f, adr_offset+40, 4, varNum) else: if zVar: f.seek(adr_offset+48, 0) head = int.from_bytes(f.read(8), 'big', signed=True) else: f.seek(adr_offset+20, 0) head = int.from_bytes(f.read(8), 'big', signed=True) aedr = head previous_aedr = head done = False # For each entry, re-adjust file offsets if needed for _ in range(0, entries): f.seek(aedr+28, 0) # Get variable number for entry num = int.from_bytes(f.read(4), 'big', signed=True) if (num > varNum): # insert an aedr to the chain # AEDRnext self._update_offset_value(f, previous_aedr+12, 8, offset) # AEDRnext self._update_offset_value(f, offset+12, 8, aedr) done = True break else: # move to the next aedr in chain f.seek(aedr+12, 0) previous_aedr = aedr aedr = int.from_bytes(f.read(8), 'big', signed=True) # If no link was made, update the last found aedr if not done: self._update_offset_value(f, previous_aedr+12, 8, offset) if zVar: self._update_offset_value(f, adr_offset+56, 4, entries+1) if (maxEntry < varNum): self._update_offset_value(f, adr_offset+60, 4, varNum) else: self._update_offset_value(f, adr_offset+36, 4, entries+1) if (maxEntry < varNum): self._update_offset_value(f, adr_offset+40, 4, varNum)
python
def _update_aedr_link(self, f, attrNum, zVar, varNum, offset): ''' Updates variable aedr links Parameters: f : file The open CDF file attrNum : int The number of the attribute to change zVar : bool True if we are updating a z variable attribute varNum : int The variable number associated with this aedr offset : int The offset in the file to the AEDR Returns: None ''' # The offset to this AEDR's ADR adr_offset = self.attrsinfo[attrNum][2] # Get the number of entries if zVar: f.seek(adr_offset+56, 0) # ADR's NzEntries entries = int.from_bytes(f.read(4), 'big', signed=True) # ADR's MAXzEntry maxEntry = int.from_bytes(f.read(4), 'big', signed=True) else: f.seek(adr_offset+36, 0) # ADR's NgrEntries entries = int.from_bytes(f.read(4), 'big', signed=True) # ADR's MAXgrEntry maxEntry = int.from_bytes(f.read(4), 'big', signed=True) if (entries == 0): # If this is the first entry, update the ADR to reflect if zVar: # AzEDRhead self._update_offset_value(f, adr_offset+48, 8, offset) # NzEntries self._update_offset_value(f, adr_offset+56, 4, 1) # MaxzEntry self._update_offset_value(f, adr_offset+60, 4, varNum) else: # AgrEDRhead self._update_offset_value(f, adr_offset+20, 8, offset) # NgrEntries self._update_offset_value(f, adr_offset+36, 4, 1) # MaxgrEntry self._update_offset_value(f, adr_offset+40, 4, varNum) else: if zVar: f.seek(adr_offset+48, 0) head = int.from_bytes(f.read(8), 'big', signed=True) else: f.seek(adr_offset+20, 0) head = int.from_bytes(f.read(8), 'big', signed=True) aedr = head previous_aedr = head done = False # For each entry, re-adjust file offsets if needed for _ in range(0, entries): f.seek(aedr+28, 0) # Get variable number for entry num = int.from_bytes(f.read(4), 'big', signed=True) if (num > varNum): # insert an aedr to the chain # AEDRnext self._update_offset_value(f, previous_aedr+12, 8, offset) # AEDRnext self._update_offset_value(f, offset+12, 8, aedr) done = True break else: # move to the next aedr in chain f.seek(aedr+12, 0) previous_aedr = aedr aedr = int.from_bytes(f.read(8), 'big', signed=True) # If no link was made, update the last found aedr if not done: self._update_offset_value(f, previous_aedr+12, 8, offset) if zVar: self._update_offset_value(f, adr_offset+56, 4, entries+1) if (maxEntry < varNum): self._update_offset_value(f, adr_offset+60, 4, varNum) else: self._update_offset_value(f, adr_offset+36, 4, entries+1) if (maxEntry < varNum): self._update_offset_value(f, adr_offset+40, 4, varNum)
[ "def", "_update_aedr_link", "(", "self", ",", "f", ",", "attrNum", ",", "zVar", ",", "varNum", ",", "offset", ")", ":", "# The offset to this AEDR's ADR", "adr_offset", "=", "self", ".", "attrsinfo", "[", "attrNum", "]", "[", "2", "]", "# Get the number of ent...
Updates variable aedr links Parameters: f : file The open CDF file attrNum : int The number of the attribute to change zVar : bool True if we are updating a z variable attribute varNum : int The variable number associated with this aedr offset : int The offset in the file to the AEDR Returns: None
[ "Updates", "variable", "aedr", "links" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2314-L2406
train
208,595
MAVENSDC/cdflib
cdflib/cdfwrite.py
CDF._md5_compute
def _md5_compute(self, f): ''' Computes the checksum of the file ''' md5 = hashlib.md5() block_size = 16384 f.seek(0, 2) remaining = f.tell() f.seek(0) while (remaining > block_size): data = f.read(block_size) remaining = remaining - block_size md5.update(data) if remaining > 0: data = f.read(remaining) md5.update(data) return md5.digest()
python
def _md5_compute(self, f): ''' Computes the checksum of the file ''' md5 = hashlib.md5() block_size = 16384 f.seek(0, 2) remaining = f.tell() f.seek(0) while (remaining > block_size): data = f.read(block_size) remaining = remaining - block_size md5.update(data) if remaining > 0: data = f.read(remaining) md5.update(data) return md5.digest()
[ "def", "_md5_compute", "(", "self", ",", "f", ")", ":", "md5", "=", "hashlib", ".", "md5", "(", ")", "block_size", "=", "16384", "f", ".", "seek", "(", "0", ",", "2", ")", "remaining", "=", "f", ".", "tell", "(", ")", "f", ".", "seek", "(", "...
Computes the checksum of the file
[ "Computes", "the", "checksum", "of", "the", "file" ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfwrite.py#L2424-L2443
train
208,596
MAVENSDC/cdflib
cdflib/cdfread.py
CDF.cdf_info
def cdf_info(self): """ Returns a dictionary that shows the basic CDF information. This information includes +---------------+--------------------------------------------------------------------------------+ | ['CDF'] | the name of the CDF | +---------------+--------------------------------------------------------------------------------+ | ['Version'] | the version of the CDF | +---------------+--------------------------------------------------------------------------------+ | ['Encoding'] | the endianness of the CDF | +---------------+--------------------------------------------------------------------------------+ | ['Majority'] | the row/column majority | +---------------+--------------------------------------------------------------------------------+ | ['zVariables']| the dictionary for zVariable numbers and their corresponding names | +---------------+--------------------------------------------------------------------------------+ | ['rVariables']| the dictionary for rVariable numbers and their corresponding names | +---------------+--------------------------------------------------------------------------------+ | ['Attributes']| the dictionary for attribute numbers and their corresponding names and scopes | +---------------+--------------------------------------------------------------------------------+ | ['Checksum'] | the checksum indicator | +---------------+--------------------------------------------------------------------------------+ | ['Num_rdim'] | the number of dimensions, applicable only to rVariables | +---------------+--------------------------------------------------------------------------------+ | ['rDim_sizes'] | the dimensional sizes, applicable only to rVariables | +----------------+-------------------------------------------------------------------------------+ | ['Compressed']| CDF is compressed at the file-level | +---------------+--------------------------------------------------------------------------------+ | ['LeapSecondUpdated']| The last updated for the leap second table, if applicable | +---------------+--------------------------------------------------------------------------------+ """ mycdf_info = {} mycdf_info['CDF'] = self.file mycdf_info['Version'] = self._version mycdf_info['Encoding'] = self._encoding mycdf_info['Majority'] = self._majority mycdf_info['rVariables'], mycdf_info['zVariables'] = self._get_varnames() mycdf_info['Attributes'] = self._get_attnames() mycdf_info['Copyright'] = self._copyright mycdf_info['Checksum'] = self._md5 mycdf_info['Num_rdim'] = self._num_rdim mycdf_info['rDim_sizes'] = self._rdim_sizes mycdf_info['Compressed'] = self._compressed if (self.cdfversion > 2): mycdf_info['LeapSecondUpdated'] = self._leap_second_updated return mycdf_info
python
def cdf_info(self): """ Returns a dictionary that shows the basic CDF information. This information includes +---------------+--------------------------------------------------------------------------------+ | ['CDF'] | the name of the CDF | +---------------+--------------------------------------------------------------------------------+ | ['Version'] | the version of the CDF | +---------------+--------------------------------------------------------------------------------+ | ['Encoding'] | the endianness of the CDF | +---------------+--------------------------------------------------------------------------------+ | ['Majority'] | the row/column majority | +---------------+--------------------------------------------------------------------------------+ | ['zVariables']| the dictionary for zVariable numbers and their corresponding names | +---------------+--------------------------------------------------------------------------------+ | ['rVariables']| the dictionary for rVariable numbers and their corresponding names | +---------------+--------------------------------------------------------------------------------+ | ['Attributes']| the dictionary for attribute numbers and their corresponding names and scopes | +---------------+--------------------------------------------------------------------------------+ | ['Checksum'] | the checksum indicator | +---------------+--------------------------------------------------------------------------------+ | ['Num_rdim'] | the number of dimensions, applicable only to rVariables | +---------------+--------------------------------------------------------------------------------+ | ['rDim_sizes'] | the dimensional sizes, applicable only to rVariables | +----------------+-------------------------------------------------------------------------------+ | ['Compressed']| CDF is compressed at the file-level | +---------------+--------------------------------------------------------------------------------+ | ['LeapSecondUpdated']| The last updated for the leap second table, if applicable | +---------------+--------------------------------------------------------------------------------+ """ mycdf_info = {} mycdf_info['CDF'] = self.file mycdf_info['Version'] = self._version mycdf_info['Encoding'] = self._encoding mycdf_info['Majority'] = self._majority mycdf_info['rVariables'], mycdf_info['zVariables'] = self._get_varnames() mycdf_info['Attributes'] = self._get_attnames() mycdf_info['Copyright'] = self._copyright mycdf_info['Checksum'] = self._md5 mycdf_info['Num_rdim'] = self._num_rdim mycdf_info['rDim_sizes'] = self._rdim_sizes mycdf_info['Compressed'] = self._compressed if (self.cdfversion > 2): mycdf_info['LeapSecondUpdated'] = self._leap_second_updated return mycdf_info
[ "def", "cdf_info", "(", "self", ")", ":", "mycdf_info", "=", "{", "}", "mycdf_info", "[", "'CDF'", "]", "=", "self", ".", "file", "mycdf_info", "[", "'Version'", "]", "=", "self", ".", "_version", "mycdf_info", "[", "'Encoding'", "]", "=", "self", ".",...
Returns a dictionary that shows the basic CDF information. This information includes +---------------+--------------------------------------------------------------------------------+ | ['CDF'] | the name of the CDF | +---------------+--------------------------------------------------------------------------------+ | ['Version'] | the version of the CDF | +---------------+--------------------------------------------------------------------------------+ | ['Encoding'] | the endianness of the CDF | +---------------+--------------------------------------------------------------------------------+ | ['Majority'] | the row/column majority | +---------------+--------------------------------------------------------------------------------+ | ['zVariables']| the dictionary for zVariable numbers and their corresponding names | +---------------+--------------------------------------------------------------------------------+ | ['rVariables']| the dictionary for rVariable numbers and their corresponding names | +---------------+--------------------------------------------------------------------------------+ | ['Attributes']| the dictionary for attribute numbers and their corresponding names and scopes | +---------------+--------------------------------------------------------------------------------+ | ['Checksum'] | the checksum indicator | +---------------+--------------------------------------------------------------------------------+ | ['Num_rdim'] | the number of dimensions, applicable only to rVariables | +---------------+--------------------------------------------------------------------------------+ | ['rDim_sizes'] | the dimensional sizes, applicable only to rVariables | +----------------+-------------------------------------------------------------------------------+ | ['Compressed']| CDF is compressed at the file-level | +---------------+--------------------------------------------------------------------------------+ | ['LeapSecondUpdated']| The last updated for the leap second table, if applicable | +---------------+--------------------------------------------------------------------------------+
[ "Returns", "a", "dictionary", "that", "shows", "the", "basic", "CDF", "information", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfread.py#L128-L175
train
208,597
MAVENSDC/cdflib
cdflib/cdfread.py
CDF.varinq
def varinq(self, variable): """ Returns a dictionary that shows the basic variable information. This information includes +-----------------+--------------------------------------------------------------------------------+ | ['Variable'] | the name of the variable | +-----------------+--------------------------------------------------------------------------------+ | ['Num'] | the variable number | +-----------------+--------------------------------------------------------------------------------+ | ['Var_Type'] | the variable type: zVariable or rVariable | +-----------------+--------------------------------------------------------------------------------+ | ['Data_Type'] | the variable's CDF data type | +-----------------+--------------------------------------------------------------------------------+ | ['Num_Elements']| the number of elements of the variable | +-----------------+--------------------------------------------------------------------------------+ | ['Num_Dims'] | the dimensionality of the variable record | +-----------------+--------------------------------------------------------------------------------+ | ['Dim_Sizes'] | the shape of the variable record | +-----------------+--------------------------------------------------------------------------------+ | ['Sparse'] | the variable's record sparseness | +-----------------+--------------------------------------------------------------------------------+ | ['Last_Rec'] | the maximum written record number (0-based) | +-----------------+--------------------------------------------------------------------------------+ | ['Dim_Vary'] | the dimensional variance(s) | +-----------------+--------------------------------------------------------------------------------+ | ['Rec_Vary'] | the record variance | +-----------------+--------------------------------------------------------------------------------+ | ['Pad'] | the padded value if set | +-----------------+--------------------------------------------------------------------------------+ | ['Compress'] | the GZIP compression level, 0 to 9. 0 if not compressed | +-----------------+--------------------------------------------------------------------------------+ | ['Block_Factor']| the blocking factor if the variable is compressed | +-----------------+--------------------------------------------------------------------------------+ Parameters ---------- variable : """ vdr_info = self.varget(variable=variable, inq=True) if vdr_info is None: raise KeyError("Variable {} not found.".format(variable)) var = {} var['Variable'] = vdr_info['name'] var['Num'] = vdr_info['variable_number'] var['Var_Type'] = CDF._variable_token(vdr_info['section_type']) var['Data_Type'] = vdr_info['data_type'] var['Data_Type_Description'] = CDF._datatype_token(vdr_info['data_type']) var['Num_Elements'] = vdr_info['num_elements'] var['Num_Dims'] = vdr_info['num_dims'] var['Dim_Sizes'] = vdr_info['dim_sizes'] var['Sparse'] = CDF._sparse_token(vdr_info['sparse']) var['Last_Rec'] = vdr_info['max_records'] var['Rec_Vary'] = vdr_info['record_vary'] var['Dim_Vary'] = vdr_info['dim_vary'] if ('pad' in vdr_info): var['Pad'] = vdr_info['pad'] var['Compress'] = vdr_info['compression_level'] if ('blocking_factor' in vdr_info): var['Block_Factor'] = vdr_info['blocking_factor'] return var
python
def varinq(self, variable): """ Returns a dictionary that shows the basic variable information. This information includes +-----------------+--------------------------------------------------------------------------------+ | ['Variable'] | the name of the variable | +-----------------+--------------------------------------------------------------------------------+ | ['Num'] | the variable number | +-----------------+--------------------------------------------------------------------------------+ | ['Var_Type'] | the variable type: zVariable or rVariable | +-----------------+--------------------------------------------------------------------------------+ | ['Data_Type'] | the variable's CDF data type | +-----------------+--------------------------------------------------------------------------------+ | ['Num_Elements']| the number of elements of the variable | +-----------------+--------------------------------------------------------------------------------+ | ['Num_Dims'] | the dimensionality of the variable record | +-----------------+--------------------------------------------------------------------------------+ | ['Dim_Sizes'] | the shape of the variable record | +-----------------+--------------------------------------------------------------------------------+ | ['Sparse'] | the variable's record sparseness | +-----------------+--------------------------------------------------------------------------------+ | ['Last_Rec'] | the maximum written record number (0-based) | +-----------------+--------------------------------------------------------------------------------+ | ['Dim_Vary'] | the dimensional variance(s) | +-----------------+--------------------------------------------------------------------------------+ | ['Rec_Vary'] | the record variance | +-----------------+--------------------------------------------------------------------------------+ | ['Pad'] | the padded value if set | +-----------------+--------------------------------------------------------------------------------+ | ['Compress'] | the GZIP compression level, 0 to 9. 0 if not compressed | +-----------------+--------------------------------------------------------------------------------+ | ['Block_Factor']| the blocking factor if the variable is compressed | +-----------------+--------------------------------------------------------------------------------+ Parameters ---------- variable : """ vdr_info = self.varget(variable=variable, inq=True) if vdr_info is None: raise KeyError("Variable {} not found.".format(variable)) var = {} var['Variable'] = vdr_info['name'] var['Num'] = vdr_info['variable_number'] var['Var_Type'] = CDF._variable_token(vdr_info['section_type']) var['Data_Type'] = vdr_info['data_type'] var['Data_Type_Description'] = CDF._datatype_token(vdr_info['data_type']) var['Num_Elements'] = vdr_info['num_elements'] var['Num_Dims'] = vdr_info['num_dims'] var['Dim_Sizes'] = vdr_info['dim_sizes'] var['Sparse'] = CDF._sparse_token(vdr_info['sparse']) var['Last_Rec'] = vdr_info['max_records'] var['Rec_Vary'] = vdr_info['record_vary'] var['Dim_Vary'] = vdr_info['dim_vary'] if ('pad' in vdr_info): var['Pad'] = vdr_info['pad'] var['Compress'] = vdr_info['compression_level'] if ('blocking_factor' in vdr_info): var['Block_Factor'] = vdr_info['blocking_factor'] return var
[ "def", "varinq", "(", "self", ",", "variable", ")", ":", "vdr_info", "=", "self", ".", "varget", "(", "variable", "=", "variable", ",", "inq", "=", "True", ")", "if", "vdr_info", "is", "None", ":", "raise", "KeyError", "(", "\"Variable {} not found.\"", ...
Returns a dictionary that shows the basic variable information. This information includes +-----------------+--------------------------------------------------------------------------------+ | ['Variable'] | the name of the variable | +-----------------+--------------------------------------------------------------------------------+ | ['Num'] | the variable number | +-----------------+--------------------------------------------------------------------------------+ | ['Var_Type'] | the variable type: zVariable or rVariable | +-----------------+--------------------------------------------------------------------------------+ | ['Data_Type'] | the variable's CDF data type | +-----------------+--------------------------------------------------------------------------------+ | ['Num_Elements']| the number of elements of the variable | +-----------------+--------------------------------------------------------------------------------+ | ['Num_Dims'] | the dimensionality of the variable record | +-----------------+--------------------------------------------------------------------------------+ | ['Dim_Sizes'] | the shape of the variable record | +-----------------+--------------------------------------------------------------------------------+ | ['Sparse'] | the variable's record sparseness | +-----------------+--------------------------------------------------------------------------------+ | ['Last_Rec'] | the maximum written record number (0-based) | +-----------------+--------------------------------------------------------------------------------+ | ['Dim_Vary'] | the dimensional variance(s) | +-----------------+--------------------------------------------------------------------------------+ | ['Rec_Vary'] | the record variance | +-----------------+--------------------------------------------------------------------------------+ | ['Pad'] | the padded value if set | +-----------------+--------------------------------------------------------------------------------+ | ['Compress'] | the GZIP compression level, 0 to 9. 0 if not compressed | +-----------------+--------------------------------------------------------------------------------+ | ['Block_Factor']| the blocking factor if the variable is compressed | +-----------------+--------------------------------------------------------------------------------+ Parameters ---------- variable :
[ "Returns", "a", "dictionary", "that", "shows", "the", "basic", "variable", "information", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfread.py#L177-L239
train
208,598
MAVENSDC/cdflib
cdflib/cdfread.py
CDF.attinq
def attinq(self, attribute=None): """ Get attribute information. Returns ------- dict Dictionary of attribution infromation. """ position = self._first_adr if isinstance(attribute, str): for _ in range(0, self._num_att): name, next_adr = self._read_adr_fast(position) if name.strip().lower() == attribute.strip().lower(): return self._read_adr(position) position = next_adr raise KeyError('No attribute {}'.format(attribute)) elif isinstance(attribute, int): if (attribute < 0 or attribute > self._num_zvariable): raise KeyError('No attribute {}'.format(attribute)) for _ in range(0, attribute): name, next_adr = self._read_adr_fast(position) position = next_adr return self._read_adr(position) else: print('Please set attribute keyword equal to the name or ', 'number of an attribute') attrs = self._get_attnames() print(attrs) for x in range(0, self._num_att): name = list(attrs[x].keys())[0] print('NAME: ' + name + ', NUMBER: ' + str(x) + ', SCOPE: ' + attrs[x][name]) return attrs
python
def attinq(self, attribute=None): """ Get attribute information. Returns ------- dict Dictionary of attribution infromation. """ position = self._first_adr if isinstance(attribute, str): for _ in range(0, self._num_att): name, next_adr = self._read_adr_fast(position) if name.strip().lower() == attribute.strip().lower(): return self._read_adr(position) position = next_adr raise KeyError('No attribute {}'.format(attribute)) elif isinstance(attribute, int): if (attribute < 0 or attribute > self._num_zvariable): raise KeyError('No attribute {}'.format(attribute)) for _ in range(0, attribute): name, next_adr = self._read_adr_fast(position) position = next_adr return self._read_adr(position) else: print('Please set attribute keyword equal to the name or ', 'number of an attribute') attrs = self._get_attnames() print(attrs) for x in range(0, self._num_att): name = list(attrs[x].keys())[0] print('NAME: ' + name + ', NUMBER: ' + str(x) + ', SCOPE: ' + attrs[x][name]) return attrs
[ "def", "attinq", "(", "self", ",", "attribute", "=", "None", ")", ":", "position", "=", "self", ".", "_first_adr", "if", "isinstance", "(", "attribute", ",", "str", ")", ":", "for", "_", "in", "range", "(", "0", ",", "self", ".", "_num_att", ")", "...
Get attribute information. Returns ------- dict Dictionary of attribution infromation.
[ "Get", "attribute", "information", "." ]
d237c60e5db67db0f92d96054209c25c4042465c
https://github.com/MAVENSDC/cdflib/blob/d237c60e5db67db0f92d96054209c25c4042465c/cdflib/cdfread.py#L247-L283
train
208,599