repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
CivicSpleen/ambry
ambry/__init__.py
config
def config(path=None, root=None, db=None): """Return the default run_config object for this installation.""" import ambry.run return ambry.run.load(path=path, root=root, db=db)
python
def config(path=None, root=None, db=None): """Return the default run_config object for this installation.""" import ambry.run return ambry.run.load(path=path, root=root, db=db)
[ "def", "config", "(", "path", "=", "None", ",", "root", "=", "None", ",", "db", "=", "None", ")", ":", "import", "ambry", ".", "run", "return", "ambry", ".", "run", ".", "load", "(", "path", "=", "path", ",", "root", "=", "root", ",", "db", "="...
Return the default run_config object for this installation.
[ "Return", "the", "default", "run_config", "object", "for", "this", "installation", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/__init__.py#L27-L30
CivicSpleen/ambry
ambry/__init__.py
get_library
def get_library(path=None, root=None, db=None): import ambry.library as _l """Return the default library for this installation.""" rc = config(path=path, root=root, db=db ) return _l.new_library(rc)
python
def get_library(path=None, root=None, db=None): import ambry.library as _l """Return the default library for this installation.""" rc = config(path=path, root=root, db=db ) return _l.new_library(rc)
[ "def", "get_library", "(", "path", "=", "None", ",", "root", "=", "None", ",", "db", "=", "None", ")", ":", "import", "ambry", ".", "library", "as", "_l", "rc", "=", "config", "(", "path", "=", "path", ",", "root", "=", "root", ",", "db", "=", ...
Return the default library for this installation.
[ "Return", "the", "default", "library", "for", "this", "installation", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/__init__.py#L34-L40
CivicSpleen/ambry
ambry/cli/__init__.py
doc_parser
def doc_parser(): """Utility function to allow getting the arguments for a single command, for Sphinx documentation""" parser = argparse.ArgumentParser( prog='ambry', description='Ambry {}. Management interface for ambry, libraries ' 'and repositories. '.format(ambry._meta.__version__)) return parser
python
def doc_parser(): """Utility function to allow getting the arguments for a single command, for Sphinx documentation""" parser = argparse.ArgumentParser( prog='ambry', description='Ambry {}. Management interface for ambry, libraries ' 'and repositories. '.format(ambry._meta.__version__)) return parser
[ "def", "doc_parser", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "prog", "=", "'ambry'", ",", "description", "=", "'Ambry {}. Management interface for ambry, libraries '", "'and repositories. '", ".", "format", "(", "ambry", ".", "_meta", "...
Utility function to allow getting the arguments for a single command, for Sphinx documentation
[ "Utility", "function", "to", "allow", "getting", "the", "arguments", "for", "a", "single", "command", "for", "Sphinx", "documentation" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/cli/__init__.py#L95-L103
CivicSpleen/ambry
ambry/cli/__init__.py
get_extra_commands
def get_extra_commands(): """Use the configuration to discover additional CLI packages to load""" from ambry.run import find_config_file from ambry.dbexceptions import ConfigurationError from ambry.util import yaml try: plugins_dir = find_config_file('cli.yaml') except ConfigurationError: return [] with open(plugins_dir) as f: cli_modules = yaml.load(f) return cli_modules
python
def get_extra_commands(): """Use the configuration to discover additional CLI packages to load""" from ambry.run import find_config_file from ambry.dbexceptions import ConfigurationError from ambry.util import yaml try: plugins_dir = find_config_file('cli.yaml') except ConfigurationError: return [] with open(plugins_dir) as f: cli_modules = yaml.load(f) return cli_modules
[ "def", "get_extra_commands", "(", ")", ":", "from", "ambry", ".", "run", "import", "find_config_file", "from", "ambry", ".", "dbexceptions", "import", "ConfigurationError", "from", "ambry", ".", "util", "import", "yaml", "try", ":", "plugins_dir", "=", "find_con...
Use the configuration to discover additional CLI packages to load
[ "Use", "the", "configuration", "to", "discover", "additional", "CLI", "packages", "to", "load" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/cli/__init__.py#L109-L123
cosven/feeluown-core
fuocore/netease/models.py
NSongModel._refresh_url
def _refresh_url(self): """刷新获取 url,失败的时候返回空而不是 None""" songs = self._api.weapi_songs_url([int(self.identifier)]) if songs and songs[0]['url']: self.url = songs[0]['url'] else: self.url = ''
python
def _refresh_url(self): """刷新获取 url,失败的时候返回空而不是 None""" songs = self._api.weapi_songs_url([int(self.identifier)]) if songs and songs[0]['url']: self.url = songs[0]['url'] else: self.url = ''
[ "def", "_refresh_url", "(", "self", ")", ":", "songs", "=", "self", ".", "_api", ".", "weapi_songs_url", "(", "[", "int", "(", "self", ".", "identifier", ")", "]", ")", "if", "songs", "and", "songs", "[", "0", "]", "[", "'url'", "]", ":", "self", ...
刷新获取 url,失败的时候返回空而不是 None
[ "刷新获取", "url,失败的时候返回空而不是", "None" ]
train
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/netease/models.py#L48-L54
cosven/feeluown-core
fuocore/netease/models.py
NSongModel.url
def url(self): """ We will always check if this song file exists in local library, if true, we return the url of the local file. .. note:: As netease song url will be expired after a period of time, we can not use static url here. Currently, we assume that the expiration time is 20 minutes, after the url expires, it will be automaticly refreshed. """ local_path = self._find_in_local() if local_path: return local_path if not self._url: self._refresh_url() elif time.time() > self._expired_at: logger.info('song({}) url is expired, refresh...'.format(self)) self._refresh_url() return self._url
python
def url(self): """ We will always check if this song file exists in local library, if true, we return the url of the local file. .. note:: As netease song url will be expired after a period of time, we can not use static url here. Currently, we assume that the expiration time is 20 minutes, after the url expires, it will be automaticly refreshed. """ local_path = self._find_in_local() if local_path: return local_path if not self._url: self._refresh_url() elif time.time() > self._expired_at: logger.info('song({}) url is expired, refresh...'.format(self)) self._refresh_url() return self._url
[ "def", "url", "(", "self", ")", ":", "local_path", "=", "self", ".", "_find_in_local", "(", ")", "if", "local_path", ":", "return", "local_path", "if", "not", "self", ".", "_url", ":", "self", ".", "_refresh_url", "(", ")", "elif", "time", ".", "time",...
We will always check if this song file exists in local library, if true, we return the url of the local file. .. note:: As netease song url will be expired after a period of time, we can not use static url here. Currently, we assume that the expiration time is 20 minutes, after the url expires, it will be automaticly refreshed.
[ "We", "will", "always", "check", "if", "this", "song", "file", "exists", "in", "local", "library", "if", "true", "we", "return", "the", "url", "of", "the", "local", "file", "." ]
train
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/netease/models.py#L67-L88
spotify/ulogger
ulogger/stackdriver.py
get_handler
def get_handler(progname, fmt=None, datefmt=None, project_id=None, credentials=None, debug_thread_worker=False, **_): """Helper function to create a Stackdriver handler. See `ulogger.stackdriver.CloudLoggingHandlerBuilder` for arguments and supported keyword arguments. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler` """ builder = CloudLoggingHandlerBuilder( progname, fmt=fmt, datefmt=datefmt, project_id=project_id, credentials=credentials, debug_thread_worker=debug_thread_worker) return builder.get_handler()
python
def get_handler(progname, fmt=None, datefmt=None, project_id=None, credentials=None, debug_thread_worker=False, **_): """Helper function to create a Stackdriver handler. See `ulogger.stackdriver.CloudLoggingHandlerBuilder` for arguments and supported keyword arguments. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler` """ builder = CloudLoggingHandlerBuilder( progname, fmt=fmt, datefmt=datefmt, project_id=project_id, credentials=credentials, debug_thread_worker=debug_thread_worker) return builder.get_handler()
[ "def", "get_handler", "(", "progname", ",", "fmt", "=", "None", ",", "datefmt", "=", "None", ",", "project_id", "=", "None", ",", "credentials", "=", "None", ",", "debug_thread_worker", "=", "False", ",", "*", "*", "_", ")", ":", "builder", "=", "Cloud...
Helper function to create a Stackdriver handler. See `ulogger.stackdriver.CloudLoggingHandlerBuilder` for arguments and supported keyword arguments. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler`
[ "Helper", "function", "to", "create", "a", "Stackdriver", "handler", "." ]
train
https://github.com/spotify/ulogger/blob/c59ced69e55b400e9c7a3688145fe3e8cb89db13/ulogger/stackdriver.py#L197-L211
spotify/ulogger
ulogger/stackdriver.py
CloudLoggingHandlerBuilder._get_metadata
def _get_metadata(self, data_type, key, timeout=5): """Get host instance metadata (only works on GCP hosts). More details about instance metadata: https://cloud.google.com/compute/docs/storing-retrieving-metadata Args: data_type (str): Type of metadata to fetch. Eg. project, instance key (str): Key of metadata to fetch timeout (int, optional): HTTP request timeout in seconds. Default is 5 seconds. Returns: (str): Plain text value of metadata entry Raises: GoogleCloudError: when request to metadata endpoint fails """ endpoint_url = self.METADATA_ENDPOINT.format( data_type=data_type, key=key) try: rsp = requests.get( endpoint_url, headers={'Metadata-Flavor': 'Google'}, timeout=timeout) rsp.raise_for_status() except requests.exceptions.RequestException as e: raise exceptions.GoogleCloudError( 'Could not fetch "{key}" from "{type}" metadata using "{url}".' 'Error: {e}'.format( key=key, type=data_type, url=endpoint_url, e=e)) metadata_value = rsp.text if metadata_value.strip() == '': raise exceptions.GoogleCloudError( 'Error when fetching metadata from "{url}": server returned ' 'an empty value.'.format(url=endpoint_url)) return metadata_value
python
def _get_metadata(self, data_type, key, timeout=5): """Get host instance metadata (only works on GCP hosts). More details about instance metadata: https://cloud.google.com/compute/docs/storing-retrieving-metadata Args: data_type (str): Type of metadata to fetch. Eg. project, instance key (str): Key of metadata to fetch timeout (int, optional): HTTP request timeout in seconds. Default is 5 seconds. Returns: (str): Plain text value of metadata entry Raises: GoogleCloudError: when request to metadata endpoint fails """ endpoint_url = self.METADATA_ENDPOINT.format( data_type=data_type, key=key) try: rsp = requests.get( endpoint_url, headers={'Metadata-Flavor': 'Google'}, timeout=timeout) rsp.raise_for_status() except requests.exceptions.RequestException as e: raise exceptions.GoogleCloudError( 'Could not fetch "{key}" from "{type}" metadata using "{url}".' 'Error: {e}'.format( key=key, type=data_type, url=endpoint_url, e=e)) metadata_value = rsp.text if metadata_value.strip() == '': raise exceptions.GoogleCloudError( 'Error when fetching metadata from "{url}": server returned ' 'an empty value.'.format(url=endpoint_url)) return metadata_value
[ "def", "_get_metadata", "(", "self", ",", "data_type", ",", "key", ",", "timeout", "=", "5", ")", ":", "endpoint_url", "=", "self", ".", "METADATA_ENDPOINT", ".", "format", "(", "data_type", "=", "data_type", ",", "key", "=", "key", ")", "try", ":", "r...
Get host instance metadata (only works on GCP hosts). More details about instance metadata: https://cloud.google.com/compute/docs/storing-retrieving-metadata Args: data_type (str): Type of metadata to fetch. Eg. project, instance key (str): Key of metadata to fetch timeout (int, optional): HTTP request timeout in seconds. Default is 5 seconds. Returns: (str): Plain text value of metadata entry Raises: GoogleCloudError: when request to metadata endpoint fails
[ "Get", "host", "instance", "metadata", "(", "only", "works", "on", "GCP", "hosts", ")", "." ]
train
https://github.com/spotify/ulogger/blob/c59ced69e55b400e9c7a3688145fe3e8cb89db13/ulogger/stackdriver.py#L92-L127
spotify/ulogger
ulogger/stackdriver.py
CloudLoggingHandlerBuilder._create_gcl_resource
def _create_gcl_resource(self): """Create a configured Resource object. The logging.resource.Resource object enables GCL to filter and bucket incoming logs according to which resource (host) they're coming from. Returns: (obj): Instance of `google.cloud.logging.resource.Resource` """ return gcl_resource.Resource('gce_instance', { 'project_id': self.project_id, 'instance_id': self.instance_id, 'zone': self.zone })
python
def _create_gcl_resource(self): """Create a configured Resource object. The logging.resource.Resource object enables GCL to filter and bucket incoming logs according to which resource (host) they're coming from. Returns: (obj): Instance of `google.cloud.logging.resource.Resource` """ return gcl_resource.Resource('gce_instance', { 'project_id': self.project_id, 'instance_id': self.instance_id, 'zone': self.zone })
[ "def", "_create_gcl_resource", "(", "self", ")", ":", "return", "gcl_resource", ".", "Resource", "(", "'gce_instance'", ",", "{", "'project_id'", ":", "self", ".", "project_id", ",", "'instance_id'", ":", "self", ".", "instance_id", ",", "'zone'", ":", "self",...
Create a configured Resource object. The logging.resource.Resource object enables GCL to filter and bucket incoming logs according to which resource (host) they're coming from. Returns: (obj): Instance of `google.cloud.logging.resource.Resource`
[ "Create", "a", "configured", "Resource", "object", "." ]
train
https://github.com/spotify/ulogger/blob/c59ced69e55b400e9c7a3688145fe3e8cb89db13/ulogger/stackdriver.py#L129-L144
spotify/ulogger
ulogger/stackdriver.py
CloudLoggingHandlerBuilder.get_formatter
def get_formatter(self): """Create a fully configured `logging.Formatter` Example of formatted log message: 2017-08-27T20:19:24.424 cpm-example-gew1 progname (23123): hello Returns: (obj): Instance of `logging.Formatter` """ if not self.fmt: self.fmt = ('%(asctime)s.%(msecs)03d {host} {progname} ' '(%(process)d): %(message)s').format( host=self.hostname, progname=self.progname) if not self.datefmt: self.datefmt = '%Y-%m-%dT%H:%M:%S' return logging.Formatter(fmt=self.fmt, datefmt=self.datefmt)
python
def get_formatter(self): """Create a fully configured `logging.Formatter` Example of formatted log message: 2017-08-27T20:19:24.424 cpm-example-gew1 progname (23123): hello Returns: (obj): Instance of `logging.Formatter` """ if not self.fmt: self.fmt = ('%(asctime)s.%(msecs)03d {host} {progname} ' '(%(process)d): %(message)s').format( host=self.hostname, progname=self.progname) if not self.datefmt: self.datefmt = '%Y-%m-%dT%H:%M:%S' return logging.Formatter(fmt=self.fmt, datefmt=self.datefmt)
[ "def", "get_formatter", "(", "self", ")", ":", "if", "not", "self", ".", "fmt", ":", "self", ".", "fmt", "=", "(", "'%(asctime)s.%(msecs)03d {host} {progname} '", "'(%(process)d): %(message)s'", ")", ".", "format", "(", "host", "=", "self", ".", "hostname", ",...
Create a fully configured `logging.Formatter` Example of formatted log message: 2017-08-27T20:19:24.424 cpm-example-gew1 progname (23123): hello Returns: (obj): Instance of `logging.Formatter`
[ "Create", "a", "fully", "configured", "logging", ".", "Formatter" ]
train
https://github.com/spotify/ulogger/blob/c59ced69e55b400e9c7a3688145fe3e8cb89db13/ulogger/stackdriver.py#L146-L161
spotify/ulogger
ulogger/stackdriver.py
CloudLoggingHandlerBuilder._set_worker_thread_level
def _set_worker_thread_level(self): """Sets logging level of the background logging thread to DEBUG or INFO """ bthread_logger = logging.getLogger( 'google.cloud.logging.handlers.transports.background_thread') if self.debug_thread_worker: bthread_logger.setLevel(logging.DEBUG) else: bthread_logger.setLevel(logging.INFO)
python
def _set_worker_thread_level(self): """Sets logging level of the background logging thread to DEBUG or INFO """ bthread_logger = logging.getLogger( 'google.cloud.logging.handlers.transports.background_thread') if self.debug_thread_worker: bthread_logger.setLevel(logging.DEBUG) else: bthread_logger.setLevel(logging.INFO)
[ "def", "_set_worker_thread_level", "(", "self", ")", ":", "bthread_logger", "=", "logging", ".", "getLogger", "(", "'google.cloud.logging.handlers.transports.background_thread'", ")", "if", "self", ".", "debug_thread_worker", ":", "bthread_logger", ".", "setLevel", "(", ...
Sets logging level of the background logging thread to DEBUG or INFO
[ "Sets", "logging", "level", "of", "the", "background", "logging", "thread", "to", "DEBUG", "or", "INFO" ]
train
https://github.com/spotify/ulogger/blob/c59ced69e55b400e9c7a3688145fe3e8cb89db13/ulogger/stackdriver.py#L163-L171
spotify/ulogger
ulogger/stackdriver.py
CloudLoggingHandlerBuilder.get_handler
def get_handler(self): """Create a fully configured CloudLoggingHandler. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler` """ gcl_client = gcl_logging.Client( project=self.project_id, credentials=self.credentials) handler = gcl_handlers.CloudLoggingHandler( gcl_client, resource=self.resource, labels={ 'resource_id': self.instance_id, 'resource_project': self.project_id, 'resource_zone': self.zone, 'resource_host': self.hostname }) handler.setFormatter(self.get_formatter()) self._set_worker_thread_level() return handler
python
def get_handler(self): """Create a fully configured CloudLoggingHandler. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler` """ gcl_client = gcl_logging.Client( project=self.project_id, credentials=self.credentials) handler = gcl_handlers.CloudLoggingHandler( gcl_client, resource=self.resource, labels={ 'resource_id': self.instance_id, 'resource_project': self.project_id, 'resource_zone': self.zone, 'resource_host': self.hostname }) handler.setFormatter(self.get_formatter()) self._set_worker_thread_level() return handler
[ "def", "get_handler", "(", "self", ")", ":", "gcl_client", "=", "gcl_logging", ".", "Client", "(", "project", "=", "self", ".", "project_id", ",", "credentials", "=", "self", ".", "credentials", ")", "handler", "=", "gcl_handlers", ".", "CloudLoggingHandler", ...
Create a fully configured CloudLoggingHandler. Returns: (obj): Instance of `google.cloud.logging.handlers. CloudLoggingHandler`
[ "Create", "a", "fully", "configured", "CloudLoggingHandler", "." ]
train
https://github.com/spotify/ulogger/blob/c59ced69e55b400e9c7a3688145fe3e8cb89db13/ulogger/stackdriver.py#L173-L194
CivicSpleen/ambry
ambry/library/search.py
Search.index_dataset
def index_dataset(self, dataset, force=False): """ Adds given dataset to the index. """ self.backend.dataset_index.index_one(dataset, force=force)
python
def index_dataset(self, dataset, force=False): """ Adds given dataset to the index. """ self.backend.dataset_index.index_one(dataset, force=force)
[ "def", "index_dataset", "(", "self", ",", "dataset", ",", "force", "=", "False", ")", ":", "self", ".", "backend", ".", "dataset_index", ".", "index_one", "(", "dataset", ",", "force", "=", "force", ")" ]
Adds given dataset to the index.
[ "Adds", "given", "dataset", "to", "the", "index", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search.py#L55-L57
CivicSpleen/ambry
ambry/library/search.py
Search.index_partition
def index_partition(self, partition, force=False): """ Adds given partition to the index. """ self.backend.partition_index.index_one(partition, force=force)
python
def index_partition(self, partition, force=False): """ Adds given partition to the index. """ self.backend.partition_index.index_one(partition, force=force)
[ "def", "index_partition", "(", "self", ",", "partition", ",", "force", "=", "False", ")", ":", "self", ".", "backend", ".", "partition_index", ".", "index_one", "(", "partition", ",", "force", "=", "force", ")" ]
Adds given partition to the index.
[ "Adds", "given", "partition", "to", "the", "index", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search.py#L59-L61
CivicSpleen/ambry
ambry/library/search.py
Search.index_bundle
def index_bundle(self, bundle, force=False): """ Indexes a bundle/dataset and all of its partitions :param bundle: A bundle or dataset object :param force: If true, index the document even if it already exists :return: """ from ambry.orm.dataset import Dataset dataset = bundle if isinstance(bundle, Dataset) else bundle.dataset self.index_dataset(dataset, force) for partition in dataset.partitions: self.index_partition(partition, force)
python
def index_bundle(self, bundle, force=False): """ Indexes a bundle/dataset and all of its partitions :param bundle: A bundle or dataset object :param force: If true, index the document even if it already exists :return: """ from ambry.orm.dataset import Dataset dataset = bundle if isinstance(bundle, Dataset) else bundle.dataset self.index_dataset(dataset, force) for partition in dataset.partitions: self.index_partition(partition, force)
[ "def", "index_bundle", "(", "self", ",", "bundle", ",", "force", "=", "False", ")", ":", "from", "ambry", ".", "orm", ".", "dataset", "import", "Dataset", "dataset", "=", "bundle", "if", "isinstance", "(", "bundle", ",", "Dataset", ")", "else", "bundle",...
Indexes a bundle/dataset and all of its partitions :param bundle: A bundle or dataset object :param force: If true, index the document even if it already exists :return:
[ "Indexes", "a", "bundle", "/", "dataset", "and", "all", "of", "its", "partitions", ":", "param", "bundle", ":", "A", "bundle", "or", "dataset", "object", ":", "param", "force", ":", "If", "true", "index", "the", "document", "even", "if", "it", "already",...
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search.py#L63-L77
CivicSpleen/ambry
ambry/library/search.py
Search.index_library_datasets
def index_library_datasets(self, tick_f=None): """ Indexes all datasets of the library. Args: tick_f (callable, optional): callable of one argument. Gets string with index state. """ dataset_n = 0 partition_n = 0 def tick(d, p): if tick_f: tick_f('datasets: {} partitions: {}'.format(d, p)) for dataset in self.library.datasets: if self.backend.dataset_index.index_one(dataset): # dataset added to index dataset_n += 1 tick(dataset_n, partition_n) for partition in dataset.partitions: self.backend.partition_index.index_one(partition) partition_n += 1 tick(dataset_n, partition_n) else: # dataset already indexed pass
python
def index_library_datasets(self, tick_f=None): """ Indexes all datasets of the library. Args: tick_f (callable, optional): callable of one argument. Gets string with index state. """ dataset_n = 0 partition_n = 0 def tick(d, p): if tick_f: tick_f('datasets: {} partitions: {}'.format(d, p)) for dataset in self.library.datasets: if self.backend.dataset_index.index_one(dataset): # dataset added to index dataset_n += 1 tick(dataset_n, partition_n) for partition in dataset.partitions: self.backend.partition_index.index_one(partition) partition_n += 1 tick(dataset_n, partition_n) else: # dataset already indexed pass
[ "def", "index_library_datasets", "(", "self", ",", "tick_f", "=", "None", ")", ":", "dataset_n", "=", "0", "partition_n", "=", "0", "def", "tick", "(", "d", ",", "p", ")", ":", "if", "tick_f", ":", "tick_f", "(", "'datasets: {} partitions: {}'", ".", "fo...
Indexes all datasets of the library. Args: tick_f (callable, optional): callable of one argument. Gets string with index state.
[ "Indexes", "all", "datasets", "of", "the", "library", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search.py#L79-L106
CivicSpleen/ambry
ambry/library/search.py
Search.search_datasets
def search_datasets(self, search_phrase, limit=None): """ Search for datasets. """ return self.backend.dataset_index.search(search_phrase, limit=limit)
python
def search_datasets(self, search_phrase, limit=None): """ Search for datasets. """ return self.backend.dataset_index.search(search_phrase, limit=limit)
[ "def", "search_datasets", "(", "self", ",", "search_phrase", ",", "limit", "=", "None", ")", ":", "return", "self", ".", "backend", ".", "dataset_index", ".", "search", "(", "search_phrase", ",", "limit", "=", "limit", ")" ]
Search for datasets.
[ "Search", "for", "datasets", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search.py#L108-L110
CivicSpleen/ambry
ambry/library/search.py
Search.search
def search(self, search_phrase, limit=None): """Search for datasets, and expand to database records""" from ambry.identity import ObjectNumber from ambry.orm.exc import NotFoundError from ambry.library.search_backends.base import SearchTermParser results = [] stp = SearchTermParser() # Because of the split between searching for partitions and bundles, some terms don't behave right. # The source term should be a limit on everything, but it isn't part of the partition doc, # so we check for it here. parsed_terms = stp.parse(search_phrase) for r in self.search_datasets(search_phrase, limit): vid = r.vid or ObjectNumber.parse(next(iter(r.partitions))).as_dataset r.vid = vid try: r.bundle = self.library.bundle(r.vid) if 'source' not in parsed_terms or parsed_terms['source'] in r.bundle.dataset.source: results.append(r) except NotFoundError: pass return sorted(results, key=lambda r : r.score, reverse=True)
python
def search(self, search_phrase, limit=None): """Search for datasets, and expand to database records""" from ambry.identity import ObjectNumber from ambry.orm.exc import NotFoundError from ambry.library.search_backends.base import SearchTermParser results = [] stp = SearchTermParser() # Because of the split between searching for partitions and bundles, some terms don't behave right. # The source term should be a limit on everything, but it isn't part of the partition doc, # so we check for it here. parsed_terms = stp.parse(search_phrase) for r in self.search_datasets(search_phrase, limit): vid = r.vid or ObjectNumber.parse(next(iter(r.partitions))).as_dataset r.vid = vid try: r.bundle = self.library.bundle(r.vid) if 'source' not in parsed_terms or parsed_terms['source'] in r.bundle.dataset.source: results.append(r) except NotFoundError: pass return sorted(results, key=lambda r : r.score, reverse=True)
[ "def", "search", "(", "self", ",", "search_phrase", ",", "limit", "=", "None", ")", ":", "from", "ambry", ".", "identity", "import", "ObjectNumber", "from", "ambry", ".", "orm", ".", "exc", "import", "NotFoundError", "from", "ambry", ".", "library", ".", ...
Search for datasets, and expand to database records
[ "Search", "for", "datasets", "and", "expand", "to", "database", "records" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search.py#L112-L140
CivicSpleen/ambry
ambry/library/search.py
Search.list_documents
def list_documents(self, limit=None): """ Return a list of the documents :param limit: :return: """ from itertools import chain return chain(self.backend.dataset_index.list_documents(limit=limit), self.backend.partition_index.list_documents(limit=limit), self.backend.identifier_index.list_documents(limit=limit))
python
def list_documents(self, limit=None): """ Return a list of the documents :param limit: :return: """ from itertools import chain return chain(self.backend.dataset_index.list_documents(limit=limit), self.backend.partition_index.list_documents(limit=limit), self.backend.identifier_index.list_documents(limit=limit))
[ "def", "list_documents", "(", "self", ",", "limit", "=", "None", ")", ":", "from", "itertools", "import", "chain", "return", "chain", "(", "self", ".", "backend", ".", "dataset_index", ".", "list_documents", "(", "limit", "=", "limit", ")", ",", "self", ...
Return a list of the documents :param limit: :return:
[ "Return", "a", "list", "of", "the", "documents", ":", "param", "limit", ":", ":", "return", ":" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search.py#L142-L152
CivicSpleen/ambry
ambry/library/search.py
Search.get_parsed_query
def get_parsed_query(self): """ Returns string with last query parsed. Assuming called after search_datasets.""" return '{} OR {}'.format( self.backend.dataset_index.get_parsed_query()[0], self.backend.partition_index.get_parsed_query()[0])
python
def get_parsed_query(self): """ Returns string with last query parsed. Assuming called after search_datasets.""" return '{} OR {}'.format( self.backend.dataset_index.get_parsed_query()[0], self.backend.partition_index.get_parsed_query()[0])
[ "def", "get_parsed_query", "(", "self", ")", ":", "return", "'{} OR {}'", ".", "format", "(", "self", ".", "backend", ".", "dataset_index", ".", "get_parsed_query", "(", ")", "[", "0", "]", ",", "self", ".", "backend", ".", "partition_index", ".", "get_par...
Returns string with last query parsed. Assuming called after search_datasets.
[ "Returns", "string", "with", "last", "query", "parsed", ".", "Assuming", "called", "after", "search_datasets", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/search.py#L154-L158
project-ncl/pnc-cli
pnc_cli/buildconfigsetrecords.py
list_build_configuration_set_records
def list_build_configuration_set_records(page_size=200, page_index=0, sort="", q=""): """ List all build configuration set records. """ data = list_build_configuration_set_records_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
def list_build_configuration_set_records(page_size=200, page_index=0, sort="", q=""): """ List all build configuration set records. """ data = list_build_configuration_set_records_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
[ "def", "list_build_configuration_set_records", "(", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "data", "=", "list_build_configuration_set_records_raw", "(", "page_size", ",", "page_index", ","...
List all build configuration set records.
[ "List", "all", "build", "configuration", "set", "records", "." ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildconfigsetrecords.py#L22-L28
project-ncl/pnc-cli
pnc_cli/buildconfigsetrecords.py
list_records_for_build_config_set
def list_records_for_build_config_set(id, page_size=200, page_index=0, sort="", q=""): """ Get a list of BuildRecords for the given BuildConfigSetRecord """ data = list_records_for_build_config_set_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
def list_records_for_build_config_set(id, page_size=200, page_index=0, sort="", q=""): """ Get a list of BuildRecords for the given BuildConfigSetRecord """ data = list_records_for_build_config_set_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
[ "def", "list_records_for_build_config_set", "(", "id", ",", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "data", "=", "list_records_for_build_config_set_raw", "(", "id", ",", "page_size", ",...
Get a list of BuildRecords for the given BuildConfigSetRecord
[ "Get", "a", "list", "of", "BuildRecords", "for", "the", "given", "BuildConfigSetRecord" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildconfigsetrecords.py#L62-L68
cosven/feeluown-core
fuocore/provider.py
AbstractProvider.auth_as
def auth_as(self, user): """auth as a user temporarily""" old_user = self._user self.auth(user) try: yield finally: self.auth(old_user)
python
def auth_as(self, user): """auth as a user temporarily""" old_user = self._user self.auth(user) try: yield finally: self.auth(old_user)
[ "def", "auth_as", "(", "self", ",", "user", ")", ":", "old_user", "=", "self", ".", "_user", "self", ".", "auth", "(", "user", ")", "try", ":", "yield", "finally", ":", "self", ".", "auth", "(", "old_user", ")" ]
auth as a user temporarily
[ "auth", "as", "a", "user", "temporarily" ]
train
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/provider.py#L70-L77
CivicSpleen/ambry
ambry/library/config.py
LibraryConfigSyncProxy.sync_accounts
def sync_accounts(self, accounts_data, clear = False, password=None, cb = None): """ Load all of the accounts from the account section of the config into the database. :param accounts_data: :param password: :return: """ # Map common values into the accounts records all_accounts = self.accounts kmap = Account.prop_map() for account_id, values in accounts_data.items(): if not isinstance(values, dict): continue d = {} a = self.library.find_or_new_account(account_id) a.secret_password = password or self.password for k, v in values.items(): if k in ('id',): continue try: if kmap[k] == 'secret' and v: a.encrypt_secret(v) else: setattr(a, kmap[k], v) except KeyError: d[k] = v a.data = d if values.get('service') == 's3': a.url = 's3://{}'.format(a.account_id) if cb: cb('Loaded account: {}'.format(a.account_id)) self.database.session.commit()
python
def sync_accounts(self, accounts_data, clear = False, password=None, cb = None): """ Load all of the accounts from the account section of the config into the database. :param accounts_data: :param password: :return: """ # Map common values into the accounts records all_accounts = self.accounts kmap = Account.prop_map() for account_id, values in accounts_data.items(): if not isinstance(values, dict): continue d = {} a = self.library.find_or_new_account(account_id) a.secret_password = password or self.password for k, v in values.items(): if k in ('id',): continue try: if kmap[k] == 'secret' and v: a.encrypt_secret(v) else: setattr(a, kmap[k], v) except KeyError: d[k] = v a.data = d if values.get('service') == 's3': a.url = 's3://{}'.format(a.account_id) if cb: cb('Loaded account: {}'.format(a.account_id)) self.database.session.commit()
[ "def", "sync_accounts", "(", "self", ",", "accounts_data", ",", "clear", "=", "False", ",", "password", "=", "None", ",", "cb", "=", "None", ")", ":", "# Map common values into the accounts records", "all_accounts", "=", "self", ".", "accounts", "kmap", "=", "...
Load all of the accounts from the account section of the config into the database. :param accounts_data: :param password: :return:
[ "Load", "all", "of", "the", "accounts", "from", "the", "account", "section", "of", "the", "config", "into", "the", "database", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/config.py#L98-L143
CivicSpleen/ambry
ambry/valuetype/core.py
robust_int
def robust_int(v): """Parse an int robustly, ignoring commas and other cruft. """ if isinstance(v, int): return v if isinstance(v, float): return int(v) v = str(v).replace(',', '') if not v: return None return int(v)
python
def robust_int(v): """Parse an int robustly, ignoring commas and other cruft. """ if isinstance(v, int): return v if isinstance(v, float): return int(v) v = str(v).replace(',', '') if not v: return None return int(v)
[ "def", "robust_int", "(", "v", ")", ":", "if", "isinstance", "(", "v", ",", "int", ")", ":", "return", "v", "if", "isinstance", "(", "v", ",", "float", ")", ":", "return", "int", "(", "v", ")", "v", "=", "str", "(", "v", ")", ".", "replace", ...
Parse an int robustly, ignoring commas and other cruft.
[ "Parse", "an", "int", "robustly", "ignoring", "commas", "and", "other", "cruft", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/valuetype/core.py#L508-L522
CivicSpleen/ambry
ambry/valuetype/core.py
ValueType.subclass
def subclass(cls, vt_code, vt_args): """ Return a dynamic subclass that has the extra parameters built in :param vt_code: The full VT code, privided to resolve_type :param vt_args: The portion of the VT code to the right of the part that matched a ValueType :return: """ return type(vt_code.replace('/', '_'), (cls,), {'vt_code': vt_code, 'vt_args': vt_args})
python
def subclass(cls, vt_code, vt_args): """ Return a dynamic subclass that has the extra parameters built in :param vt_code: The full VT code, privided to resolve_type :param vt_args: The portion of the VT code to the right of the part that matched a ValueType :return: """ return type(vt_code.replace('/', '_'), (cls,), {'vt_code': vt_code, 'vt_args': vt_args})
[ "def", "subclass", "(", "cls", ",", "vt_code", ",", "vt_args", ")", ":", "return", "type", "(", "vt_code", ".", "replace", "(", "'/'", ",", "'_'", ")", ",", "(", "cls", ",", ")", ",", "{", "'vt_code'", ":", "vt_code", ",", "'vt_args'", ":", "vt_arg...
Return a dynamic subclass that has the extra parameters built in :param vt_code: The full VT code, privided to resolve_type :param vt_args: The portion of the VT code to the right of the part that matched a ValueType :return:
[ "Return", "a", "dynamic", "subclass", "that", "has", "the", "extra", "parameters", "built", "in", ":", "param", "vt_code", ":", "The", "full", "VT", "code", "privided", "to", "resolve_type", ":", "param", "vt_args", ":", "The", "portion", "of", "the", "VT"...
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/valuetype/core.py#L103-L110
SmartTeleMax/iktomi
iktomi/templates/__init__.py
Template.render
def render(self, template_name, **kw): ''' Given a template name and template vars. Searches a template file based on engine set, and renders it with corresponding engine. Returns a string. ''' logger.debug('Rendering template "%s"', template_name) vars = self.globs.copy() vars.update(kw) resolved_name, engine = self.resolve(template_name) return engine.render(resolved_name, **vars)
python
def render(self, template_name, **kw): ''' Given a template name and template vars. Searches a template file based on engine set, and renders it with corresponding engine. Returns a string. ''' logger.debug('Rendering template "%s"', template_name) vars = self.globs.copy() vars.update(kw) resolved_name, engine = self.resolve(template_name) return engine.render(resolved_name, **vars)
[ "def", "render", "(", "self", ",", "template_name", ",", "*", "*", "kw", ")", ":", "logger", ".", "debug", "(", "'Rendering template \"%s\"'", ",", "template_name", ")", "vars", "=", "self", ".", "globs", ".", "copy", "(", ")", "vars", ".", "update", "...
Given a template name and template vars. Searches a template file based on engine set, and renders it with corresponding engine. Returns a string.
[ "Given", "a", "template", "name", "and", "template", "vars", ".", "Searches", "a", "template", "file", "based", "on", "engine", "set", "and", "renders", "it", "with", "corresponding", "engine", ".", "Returns", "a", "string", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/templates/__init__.py#L31-L42
SmartTeleMax/iktomi
iktomi/templates/__init__.py
BoundTemplate.render
def render(self, template_name, __data=None, **kw): '''Given a template name and template data. Renders a template and returns as string''' return self.template.render(template_name, **self._vars(__data, **kw))
python
def render(self, template_name, __data=None, **kw): '''Given a template name and template data. Renders a template and returns as string''' return self.template.render(template_name, **self._vars(__data, **kw))
[ "def", "render", "(", "self", ",", "template_name", ",", "__data", "=", "None", ",", "*", "*", "kw", ")", ":", "return", "self", ".", "template", ".", "render", "(", "template_name", ",", "*", "*", "self", ".", "_vars", "(", "__data", ",", "*", "*"...
Given a template name and template data. Renders a template and returns as string
[ "Given", "a", "template", "name", "and", "template", "data", ".", "Renders", "a", "template", "and", "returns", "as", "string" ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/templates/__init__.py#L100-L104
SmartTeleMax/iktomi
iktomi/templates/__init__.py
BoundTemplate.render_to_response
def render_to_response(self, template_name, __data, content_type="text/html"): '''Given a template name and template data. Renders a template and returns `webob.Response` object''' resp = self.render(template_name, __data) return Response(resp, content_type=content_type)
python
def render_to_response(self, template_name, __data, content_type="text/html"): '''Given a template name and template data. Renders a template and returns `webob.Response` object''' resp = self.render(template_name, __data) return Response(resp, content_type=content_type)
[ "def", "render_to_response", "(", "self", ",", "template_name", ",", "__data", ",", "content_type", "=", "\"text/html\"", ")", ":", "resp", "=", "self", ".", "render", "(", "template_name", ",", "__data", ")", "return", "Response", "(", "resp", ",", "content...
Given a template name and template data. Renders a template and returns `webob.Response` object
[ "Given", "a", "template", "name", "and", "template", "data", ".", "Renders", "a", "template", "and", "returns", "webob", ".", "Response", "object" ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/templates/__init__.py#L106-L112
alvations/lazyme
lazyme/iterate.py
per_section
def per_section(it, is_delimiter=lambda x: x.isspace()): """ From http://stackoverflow.com/a/25226944/610569 """ ret = [] for line in it: if is_delimiter(line): if ret: yield ret # OR ''.join(ret) ret = [] else: ret.append(line.rstrip()) # OR ret.append(line) if ret: yield ret
python
def per_section(it, is_delimiter=lambda x: x.isspace()): """ From http://stackoverflow.com/a/25226944/610569 """ ret = [] for line in it: if is_delimiter(line): if ret: yield ret # OR ''.join(ret) ret = [] else: ret.append(line.rstrip()) # OR ret.append(line) if ret: yield ret
[ "def", "per_section", "(", "it", ",", "is_delimiter", "=", "lambda", "x", ":", "x", ".", "isspace", "(", ")", ")", ":", "ret", "=", "[", "]", "for", "line", "in", "it", ":", "if", "is_delimiter", "(", "line", ")", ":", "if", "ret", ":", "yield", ...
From http://stackoverflow.com/a/25226944/610569
[ "From", "http", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "25226944", "/", "610569" ]
train
https://github.com/alvations/lazyme/blob/961a8282198588ff72e15643f725ce895e51d06d/lazyme/iterate.py#L11-L24
alvations/lazyme
lazyme/iterate.py
per_chunk
def per_chunk(iterable, n=1, fillvalue=None): """ From http://stackoverflow.com/a/8991553/610569 >>> list(per_chunk('abcdefghi', n=2)) [('a', 'b'), ('c', 'd'), ('e', 'f'), ('g', 'h'), ('i', None)] >>> list(per_chunk('abcdefghi', n=3)) [('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'h', 'i')] """ args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue)
python
def per_chunk(iterable, n=1, fillvalue=None): """ From http://stackoverflow.com/a/8991553/610569 >>> list(per_chunk('abcdefghi', n=2)) [('a', 'b'), ('c', 'd'), ('e', 'f'), ('g', 'h'), ('i', None)] >>> list(per_chunk('abcdefghi', n=3)) [('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'h', 'i')] """ args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue)
[ "def", "per_chunk", "(", "iterable", ",", "n", "=", "1", ",", "fillvalue", "=", "None", ")", ":", "args", "=", "[", "iter", "(", "iterable", ")", "]", "*", "n", "return", "zip_longest", "(", "*", "args", ",", "fillvalue", "=", "fillvalue", ")" ]
From http://stackoverflow.com/a/8991553/610569 >>> list(per_chunk('abcdefghi', n=2)) [('a', 'b'), ('c', 'd'), ('e', 'f'), ('g', 'h'), ('i', None)] >>> list(per_chunk('abcdefghi', n=3)) [('a', 'b', 'c'), ('d', 'e', 'f'), ('g', 'h', 'i')]
[ "From", "http", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "8991553", "/", "610569" ]
train
https://github.com/alvations/lazyme/blob/961a8282198588ff72e15643f725ce895e51d06d/lazyme/iterate.py#L26-L36
alvations/lazyme
lazyme/iterate.py
per_window
def per_window(sequence, n=1): """ From http://stackoverflow.com/q/42220614/610569 >>> list(per_window([1,2,3,4], n=2)) [(1, 2), (2, 3), (3, 4)] >>> list(per_window([1,2,3,4], n=3)) [(1, 2, 3), (2, 3, 4)] """ start, stop = 0, n seq = list(sequence) while stop <= len(seq): yield tuple(seq[start:stop]) start += 1 stop += 1
python
def per_window(sequence, n=1): """ From http://stackoverflow.com/q/42220614/610569 >>> list(per_window([1,2,3,4], n=2)) [(1, 2), (2, 3), (3, 4)] >>> list(per_window([1,2,3,4], n=3)) [(1, 2, 3), (2, 3, 4)] """ start, stop = 0, n seq = list(sequence) while stop <= len(seq): yield tuple(seq[start:stop]) start += 1 stop += 1
[ "def", "per_window", "(", "sequence", ",", "n", "=", "1", ")", ":", "start", ",", "stop", "=", "0", ",", "n", "seq", "=", "list", "(", "sequence", ")", "while", "stop", "<=", "len", "(", "seq", ")", ":", "yield", "tuple", "(", "seq", "[", "star...
From http://stackoverflow.com/q/42220614/610569 >>> list(per_window([1,2,3,4], n=2)) [(1, 2), (2, 3), (3, 4)] >>> list(per_window([1,2,3,4], n=3)) [(1, 2, 3), (2, 3, 4)]
[ "From", "http", ":", "//", "stackoverflow", ".", "com", "/", "q", "/", "42220614", "/", "610569" ]
train
https://github.com/alvations/lazyme/blob/961a8282198588ff72e15643f725ce895e51d06d/lazyme/iterate.py#L38-L52
alvations/lazyme
lazyme/iterate.py
skipping_window
def skipping_window(sequence, target, n=3): """ Return a sliding window with a constraint to check that target is inside the window. From http://stackoverflow.com/q/43626525/610569 >>> list(skipping_window([1,2,3,4,5], 2, 3)) [(1, 2, 3), (2, 3, 4)] """ start, stop = 0, n seq = list(sequence) while stop <= len(seq): subseq = seq[start:stop] if target in subseq: yield tuple(seq[start:stop]) start += 1 stop += 1 # Fast forwarding the start. # Find the next window which contains the target. try: # `seq.index(target, start) - (n-1)` would be the next # window where the constraint is met. start = max(seq.index(target, start) - (n-1), start) stop = start + n except ValueError: break
python
def skipping_window(sequence, target, n=3): """ Return a sliding window with a constraint to check that target is inside the window. From http://stackoverflow.com/q/43626525/610569 >>> list(skipping_window([1,2,3,4,5], 2, 3)) [(1, 2, 3), (2, 3, 4)] """ start, stop = 0, n seq = list(sequence) while stop <= len(seq): subseq = seq[start:stop] if target in subseq: yield tuple(seq[start:stop]) start += 1 stop += 1 # Fast forwarding the start. # Find the next window which contains the target. try: # `seq.index(target, start) - (n-1)` would be the next # window where the constraint is met. start = max(seq.index(target, start) - (n-1), start) stop = start + n except ValueError: break
[ "def", "skipping_window", "(", "sequence", ",", "target", ",", "n", "=", "3", ")", ":", "start", ",", "stop", "=", "0", ",", "n", "seq", "=", "list", "(", "sequence", ")", "while", "stop", "<=", "len", "(", "seq", ")", ":", "subseq", "=", "seq", ...
Return a sliding window with a constraint to check that target is inside the window. From http://stackoverflow.com/q/43626525/610569 >>> list(skipping_window([1,2,3,4,5], 2, 3)) [(1, 2, 3), (2, 3, 4)]
[ "Return", "a", "sliding", "window", "with", "a", "constraint", "to", "check", "that", "target", "is", "inside", "the", "window", ".", "From", "http", ":", "//", "stackoverflow", ".", "com", "/", "q", "/", "43626525", "/", "610569" ]
train
https://github.com/alvations/lazyme/blob/961a8282198588ff72e15643f725ce895e51d06d/lazyme/iterate.py#L63-L88
alvations/lazyme
lazyme/iterate.py
camel_shuffle
def camel_shuffle(sequence): """ Inspired by https://stackoverflow.com/q/42549212/610569 >>> list(range(12)) # Linear. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] >>> camel_shuffle(list(range(12))) # M-shape. [0, 4, 8, 9, 5, 1, 2, 6, 10, 11, 7, 3] >>> camel_shuffle(list(reversed(range(12)))) #W-shape. [11, 7, 3, 2, 6, 10, 9, 5, 1, 0, 4, 8] """ one_three, two_four = zigzag(sequence) one, three = zigzag(one_three) two, four = zigzag(two_four) return one + list(reversed(two)) + three + list(reversed(four))
python
def camel_shuffle(sequence): """ Inspired by https://stackoverflow.com/q/42549212/610569 >>> list(range(12)) # Linear. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] >>> camel_shuffle(list(range(12))) # M-shape. [0, 4, 8, 9, 5, 1, 2, 6, 10, 11, 7, 3] >>> camel_shuffle(list(reversed(range(12)))) #W-shape. [11, 7, 3, 2, 6, 10, 9, 5, 1, 0, 4, 8] """ one_three, two_four = zigzag(sequence) one, three = zigzag(one_three) two, four = zigzag(two_four) return one + list(reversed(two)) + three + list(reversed(four))
[ "def", "camel_shuffle", "(", "sequence", ")", ":", "one_three", ",", "two_four", "=", "zigzag", "(", "sequence", ")", "one", ",", "three", "=", "zigzag", "(", "one_three", ")", "two", ",", "four", "=", "zigzag", "(", "two_four", ")", "return", "one", "...
Inspired by https://stackoverflow.com/q/42549212/610569 >>> list(range(12)) # Linear. [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] >>> camel_shuffle(list(range(12))) # M-shape. [0, 4, 8, 9, 5, 1, 2, 6, 10, 11, 7, 3] >>> camel_shuffle(list(reversed(range(12)))) #W-shape. [11, 7, 3, 2, 6, 10, 9, 5, 1, 0, 4, 8]
[ "Inspired", "by", "https", ":", "//", "stackoverflow", ".", "com", "/", "q", "/", "42549212", "/", "610569" ]
train
https://github.com/alvations/lazyme/blob/961a8282198588ff72e15643f725ce895e51d06d/lazyme/iterate.py#L90-L106
SmartTeleMax/iktomi
iktomi/utils/paginator.py
Paginator.page
def page(self): '''Current page.''' page = self.request.GET.get(self.page_param) if not page: return 1 try: page = int(page) except ValueError: self.invalid_page() return 1 if page<1: self.invalid_page() return 1 return page
python
def page(self): '''Current page.''' page = self.request.GET.get(self.page_param) if not page: return 1 try: page = int(page) except ValueError: self.invalid_page() return 1 if page<1: self.invalid_page() return 1 return page
[ "def", "page", "(", "self", ")", ":", "page", "=", "self", ".", "request", ".", "GET", ".", "get", "(", "self", ".", "page_param", ")", "if", "not", "page", ":", "return", "1", "try", ":", "page", "=", "int", "(", "page", ")", "except", "ValueErr...
Current page.
[ "Current", "page", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/utils/paginator.py#L160-L173
SmartTeleMax/iktomi
iktomi/utils/paginator.py
Paginator.url
def url(self): '''Current or base URL. Can be redefined via keyword argument on initialization. Returns `iktomi.web.URL object. `''' return URL.from_url(self.request.url, show_host=self.show_host)
python
def url(self): '''Current or base URL. Can be redefined via keyword argument on initialization. Returns `iktomi.web.URL object. `''' return URL.from_url(self.request.url, show_host=self.show_host)
[ "def", "url", "(", "self", ")", ":", "return", "URL", ".", "from_url", "(", "self", ".", "request", ".", "url", ",", "show_host", "=", "self", ".", "show_host", ")" ]
Current or base URL. Can be redefined via keyword argument on initialization. Returns `iktomi.web.URL object. `
[ "Current", "or", "base", "URL", ".", "Can", "be", "redefined", "via", "keyword", "argument", "on", "initialization", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/utils/paginator.py#L176-L182
SmartTeleMax/iktomi
iktomi/utils/paginator.py
Paginator.page_url
def page_url(self, page): ''' Returns URL for page, page is included as query parameter. Can be redefined by keyword argument ''' if page is not None and page != 1: return self.url.qs_set(**{self.page_param: page}) elif page is not None: return self.url.qs_delete('page')
python
def page_url(self, page): ''' Returns URL for page, page is included as query parameter. Can be redefined by keyword argument ''' if page is not None and page != 1: return self.url.qs_set(**{self.page_param: page}) elif page is not None: return self.url.qs_delete('page')
[ "def", "page_url", "(", "self", ",", "page", ")", ":", "if", "page", "is", "not", "None", "and", "page", "!=", "1", ":", "return", "self", ".", "url", ".", "qs_set", "(", "*", "*", "{", "self", ".", "page_param", ":", "page", "}", ")", "elif", ...
Returns URL for page, page is included as query parameter. Can be redefined by keyword argument
[ "Returns", "URL", "for", "page", "page", "is", "included", "as", "query", "parameter", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/utils/paginator.py#L184-L193
SmartTeleMax/iktomi
iktomi/utils/paginator.py
Paginator.pages_count
def pages_count(self): '''Number of pages.''' if not self.limit or self.count<self.limit: return 1 if self.count % self.limit <= self.orphans: return self.count // self.limit return int(math.ceil(float(self.count)/self.limit))
python
def pages_count(self): '''Number of pages.''' if not self.limit or self.count<self.limit: return 1 if self.count % self.limit <= self.orphans: return self.count // self.limit return int(math.ceil(float(self.count)/self.limit))
[ "def", "pages_count", "(", "self", ")", ":", "if", "not", "self", ".", "limit", "or", "self", ".", "count", "<", "self", ".", "limit", ":", "return", "1", "if", "self", ".", "count", "%", "self", ".", "limit", "<=", "self", ".", "orphans", ":", "...
Number of pages.
[ "Number", "of", "pages", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/utils/paginator.py#L199-L205
SmartTeleMax/iktomi
iktomi/utils/paginator.py
Paginator.slice
def slice(self, items): '''Slice the sequence of all items to obtain them for current page.''' if self.limit: if self.page>self.pages_count: return [] if self.page == self.pages_count: return items[self.limit*(self.page-1):] return items[self.limit*(self.page-1):self.limit*self.page] else: return items[:]
python
def slice(self, items): '''Slice the sequence of all items to obtain them for current page.''' if self.limit: if self.page>self.pages_count: return [] if self.page == self.pages_count: return items[self.limit*(self.page-1):] return items[self.limit*(self.page-1):self.limit*self.page] else: return items[:]
[ "def", "slice", "(", "self", ",", "items", ")", ":", "if", "self", ".", "limit", ":", "if", "self", ".", "page", ">", "self", ".", "pages_count", ":", "return", "[", "]", "if", "self", ".", "page", "==", "self", ".", "pages_count", ":", "return", ...
Slice the sequence of all items to obtain them for current page.
[ "Slice", "the", "sequence", "of", "all", "items", "to", "obtain", "them", "for", "current", "page", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/utils/paginator.py#L207-L216
project-ncl/pnc-cli
pnc_cli/swagger_client/apis/projects_api.py
ProjectsApi.get_build_configurations
def get_build_configurations(self, id, **kwargs): """ Gets all BuildConfigurations associated with the specified Project Id This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_build_configurations(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Project Id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_build_configurations_with_http_info(id, **kwargs) else: (data) = self.get_build_configurations_with_http_info(id, **kwargs) return data
python
def get_build_configurations(self, id, **kwargs): """ Gets all BuildConfigurations associated with the specified Project Id This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_build_configurations(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Project Id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.get_build_configurations_with_http_info(id, **kwargs) else: (data) = self.get_build_configurations_with_http_info(id, **kwargs) return data
[ "def", "get_build_configurations", "(", "self", ",", "id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'callback'", ")", ":", "return", "self", ".", "get_build_configuratio...
Gets all BuildConfigurations associated with the specified Project Id This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_build_configurations(id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int id: Project Id (required) :param int page_index: Page Index :param int page_size: Pagination size :param str sort: Sorting RSQL :param str q: RSQL Query :return: BuildConfigurationPage If the method is called asynchronously, returns the request thread.
[ "Gets", "all", "BuildConfigurations", "associated", "with", "the", "specified", "Project", "Id", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "define", "a...
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/swagger_client/apis/projects_api.py#L366-L394
CivicSpleen/ambry
ambry/orm/account.py
Account.dict
def dict(self): """A dict that holds key/values for all of the properties in the object. :return: """ d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('data')} d['secret'] = 'not available' if self.secret_password: try: d['secret'] = self.decrypt_secret() except AccountDecryptionError: pass if self.data: for k, v in self.data.items(): d[k] = v return {k: v for k, v in d.items()}
python
def dict(self): """A dict that holds key/values for all of the properties in the object. :return: """ d = {p.key: getattr(self, p.key) for p in self.__mapper__.attrs if p.key not in ('data')} d['secret'] = 'not available' if self.secret_password: try: d['secret'] = self.decrypt_secret() except AccountDecryptionError: pass if self.data: for k, v in self.data.items(): d[k] = v return {k: v for k, v in d.items()}
[ "def", "dict", "(", "self", ")", ":", "d", "=", "{", "p", ".", "key", ":", "getattr", "(", "self", ",", "p", ".", "key", ")", "for", "p", "in", "self", ".", "__mapper__", ".", "attrs", "if", "p", ".", "key", "not", "in", "(", "'data'", ")", ...
A dict that holds key/values for all of the properties in the object. :return:
[ "A", "dict", "that", "holds", "key", "/", "values", "for", "all", "of", "the", "properties", "in", "the", "object", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/account.py#L187-L209
project-ncl/pnc-cli
pnc_cli/buildrecords.py
list_build_records
def list_build_records(page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords """ data = list_build_records_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
def list_build_records(page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords """ data = list_build_records_raw(page_size, page_index, sort, q) if data: return utils.format_json_list(data)
[ "def", "list_build_records", "(", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "data", "=", "list_build_records_raw", "(", "page_size", ",", "page_index", ",", "sort", ",", "q", ")", "...
List all BuildRecords
[ "List", "all", "BuildRecords" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildrecords.py#L13-L19
project-ncl/pnc-cli
pnc_cli/buildrecords.py
list_records_for_build_configuration
def list_records_for_build_configuration(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords for a given BuildConfiguration """ data = list_records_for_build_configuration_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
def list_records_for_build_configuration(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords for a given BuildConfiguration """ data = list_records_for_build_configuration_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
[ "def", "list_records_for_build_configuration", "(", "id", "=", "None", ",", "name", "=", "None", ",", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "data", "=", "list_records_for_build_conf...
List all BuildRecords for a given BuildConfiguration
[ "List", "all", "BuildRecords", "for", "a", "given", "BuildConfiguration" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildrecords.py#L33-L39
project-ncl/pnc-cli
pnc_cli/buildrecords.py
list_records_for_project
def list_records_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords for a given Project """ data = list_records_for_project_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
def list_records_for_project(id=None, name=None, page_size=200, page_index=0, sort="", q=""): """ List all BuildRecords for a given Project """ data = list_records_for_project_raw(id, name, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
[ "def", "list_records_for_project", "(", "id", "=", "None", ",", "name", "=", "None", ",", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "data", "=", "list_records_for_project_raw", "(", ...
List all BuildRecords for a given Project
[ "List", "all", "BuildRecords", "for", "a", "given", "Project" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildrecords.py#L55-L61
project-ncl/pnc-cli
pnc_cli/buildrecords.py
list_built_artifacts
def list_built_artifacts(id, page_size=200, page_index=0, sort="", q=""): """ List Artifacts associated with a BuildRecord """ data = list_built_artifacts_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
def list_built_artifacts(id, page_size=200, page_index=0, sort="", q=""): """ List Artifacts associated with a BuildRecord """ data = list_built_artifacts_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
[ "def", "list_built_artifacts", "(", "id", ",", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "data", "=", "list_built_artifacts_raw", "(", "id", ",", "page_size", ",", "page_index", ",", ...
List Artifacts associated with a BuildRecord
[ "List", "Artifacts", "associated", "with", "a", "BuildRecord" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildrecords.py#L91-L97
project-ncl/pnc-cli
pnc_cli/buildrecords.py
list_dependency_artifacts
def list_dependency_artifacts(id, page_size=200, page_index=0, sort="", q=""): """ List dependency artifacts associated with a BuildRecord """ data = list_dependency_artifacts_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
python
def list_dependency_artifacts(id, page_size=200, page_index=0, sort="", q=""): """ List dependency artifacts associated with a BuildRecord """ data = list_dependency_artifacts_raw(id, page_size, page_index, sort, q) if data: return utils.format_json_list(data)
[ "def", "list_dependency_artifacts", "(", "id", ",", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ",", "q", "=", "\"\"", ")", ":", "data", "=", "list_dependency_artifacts_raw", "(", "id", ",", "page_size", ",", "page_index...
List dependency artifacts associated with a BuildRecord
[ "List", "dependency", "artifacts", "associated", "with", "a", "BuildRecord" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/buildrecords.py#L111-L117
paddycarey/mopidy-webhooks
mopidy_webhooks/reporters/status.py
StatusReporter.on_start
def on_start(self): """Runs when the actor is started and schedules a status update """ logger.info('StatusReporter started.') # if configured not to report status then return immediately if self.config['status_update_interval'] == 0: logger.info('StatusReporter disabled by configuration.') return self.in_future.report_status()
python
def on_start(self): """Runs when the actor is started and schedules a status update """ logger.info('StatusReporter started.') # if configured not to report status then return immediately if self.config['status_update_interval'] == 0: logger.info('StatusReporter disabled by configuration.') return self.in_future.report_status()
[ "def", "on_start", "(", "self", ")", ":", "logger", ".", "info", "(", "'StatusReporter started.'", ")", "# if configured not to report status then return immediately", "if", "self", ".", "config", "[", "'status_update_interval'", "]", "==", "0", ":", "logger", ".", ...
Runs when the actor is started and schedules a status update
[ "Runs", "when", "the", "actor", "is", "started", "and", "schedules", "a", "status", "update" ]
train
https://github.com/paddycarey/mopidy-webhooks/blob/813b57333d96837a5ed91b00e389383815686908/mopidy_webhooks/reporters/status.py#L30-L38
paddycarey/mopidy-webhooks
mopidy_webhooks/reporters/status.py
StatusReporter.report_again
def report_again(self, current_status): """Computes a sleep interval, sleeps for the specified amount of time then kicks off another status report. """ # calculate sleep interval based on current status and configured interval _m = {'playing': 1, 'paused': 2, 'stopped': 5}[current_status['state']] interval = (self.config['status_update_interval'] * _m) / 1000.0 # sleep for computed interval and kickoff another webhook time.sleep(interval) self.in_future.report_status()
python
def report_again(self, current_status): """Computes a sleep interval, sleeps for the specified amount of time then kicks off another status report. """ # calculate sleep interval based on current status and configured interval _m = {'playing': 1, 'paused': 2, 'stopped': 5}[current_status['state']] interval = (self.config['status_update_interval'] * _m) / 1000.0 # sleep for computed interval and kickoff another webhook time.sleep(interval) self.in_future.report_status()
[ "def", "report_again", "(", "self", ",", "current_status", ")", ":", "# calculate sleep interval based on current status and configured interval", "_m", "=", "{", "'playing'", ":", "1", ",", "'paused'", ":", "2", ",", "'stopped'", ":", "5", "}", "[", "current_status...
Computes a sleep interval, sleeps for the specified amount of time then kicks off another status report.
[ "Computes", "a", "sleep", "interval", "sleeps", "for", "the", "specified", "amount", "of", "time", "then", "kicks", "off", "another", "status", "report", "." ]
train
https://github.com/paddycarey/mopidy-webhooks/blob/813b57333d96837a5ed91b00e389383815686908/mopidy_webhooks/reporters/status.py#L40-L49
paddycarey/mopidy-webhooks
mopidy_webhooks/reporters/status.py
StatusReporter.report_status
def report_status(self): """Get status of player from mopidy core and send webhook. """ current_status = { 'current_track': self.core.playback.current_track.get(), 'state': self.core.playback.state.get(), 'time_position': self.core.playback.time_position.get(), } send_webhook(self.config, {'status_report': current_status}) self.report_again(current_status)
python
def report_status(self): """Get status of player from mopidy core and send webhook. """ current_status = { 'current_track': self.core.playback.current_track.get(), 'state': self.core.playback.state.get(), 'time_position': self.core.playback.time_position.get(), } send_webhook(self.config, {'status_report': current_status}) self.report_again(current_status)
[ "def", "report_status", "(", "self", ")", ":", "current_status", "=", "{", "'current_track'", ":", "self", ".", "core", ".", "playback", ".", "current_track", ".", "get", "(", ")", ",", "'state'", ":", "self", ".", "core", ".", "playback", ".", "state", ...
Get status of player from mopidy core and send webhook.
[ "Get", "status", "of", "player", "from", "mopidy", "core", "and", "send", "webhook", "." ]
train
https://github.com/paddycarey/mopidy-webhooks/blob/813b57333d96837a5ed91b00e389383815686908/mopidy_webhooks/reporters/status.py#L51-L60
edoburu/django-any-urlfield
any_urlfield/cache.py
get_urlfield_cache_key
def get_urlfield_cache_key(model, pk, language_code=None): """ The low-level function to get the cache key for a model. """ return 'anyurlfield.{0}.{1}.{2}.{3}'.format(model._meta.app_label, model.__name__, pk, language_code or get_language())
python
def get_urlfield_cache_key(model, pk, language_code=None): """ The low-level function to get the cache key for a model. """ return 'anyurlfield.{0}.{1}.{2}.{3}'.format(model._meta.app_label, model.__name__, pk, language_code or get_language())
[ "def", "get_urlfield_cache_key", "(", "model", ",", "pk", ",", "language_code", "=", "None", ")", ":", "return", "'anyurlfield.{0}.{1}.{2}.{3}'", ".", "format", "(", "model", ".", "_meta", ".", "app_label", ",", "model", ".", "__name__", ",", "pk", ",", "lan...
The low-level function to get the cache key for a model.
[ "The", "low", "-", "level", "function", "to", "get", "the", "cache", "key", "for", "a", "model", "." ]
train
https://github.com/edoburu/django-any-urlfield/blob/8d7d36c8a1fc251930f6dbdcc8b5b5151d20e3ab/any_urlfield/cache.py#L7-L11
edoburu/django-any-urlfield
any_urlfield/cache.py
get_object_cache_keys
def get_object_cache_keys(instance): """ Return the cache keys associated with an object. """ if not instance.pk or instance._state.adding: return [] keys = [] for language in _get_available_languages(instance): keys.append(get_urlfield_cache_key(instance.__class__, instance.pk, language)) return keys
python
def get_object_cache_keys(instance): """ Return the cache keys associated with an object. """ if not instance.pk or instance._state.adding: return [] keys = [] for language in _get_available_languages(instance): keys.append(get_urlfield_cache_key(instance.__class__, instance.pk, language)) return keys
[ "def", "get_object_cache_keys", "(", "instance", ")", ":", "if", "not", "instance", ".", "pk", "or", "instance", ".", "_state", ".", "adding", ":", "return", "[", "]", "keys", "=", "[", "]", "for", "language", "in", "_get_available_languages", "(", "instan...
Return the cache keys associated with an object.
[ "Return", "the", "cache", "keys", "associated", "with", "an", "object", "." ]
train
https://github.com/edoburu/django-any-urlfield/blob/8d7d36c8a1fc251930f6dbdcc8b5b5151d20e3ab/any_urlfield/cache.py#L14-L25
flaviogrossi/sockjs-cyclone
sockjs/cyclone/stats.py
MovingAverage.flush
def flush(self): """ Add accumulator to the moving average queue and reset it. For example, called by the StatsCollector once per second to calculate per-second average. """ n = self.accumulator self.accumulator = 0 stream = self.stream stream.append(n) self.sum += n streamlen = len(stream) if streamlen > self.period: self.sum -= stream.popleft() streamlen -= 1 if streamlen == 0: self.last_average = 0 else: self.last_average = self.sum / streamlen
python
def flush(self): """ Add accumulator to the moving average queue and reset it. For example, called by the StatsCollector once per second to calculate per-second average. """ n = self.accumulator self.accumulator = 0 stream = self.stream stream.append(n) self.sum += n streamlen = len(stream) if streamlen > self.period: self.sum -= stream.popleft() streamlen -= 1 if streamlen == 0: self.last_average = 0 else: self.last_average = self.sum / streamlen
[ "def", "flush", "(", "self", ")", ":", "n", "=", "self", ".", "accumulator", "self", ".", "accumulator", "=", "0", "stream", "=", "self", ".", "stream", "stream", ".", "append", "(", "n", ")", "self", ".", "sum", "+=", "n", "streamlen", "=", "len",...
Add accumulator to the moving average queue and reset it. For example, called by the StatsCollector once per second to calculate per-second average.
[ "Add", "accumulator", "to", "the", "moving", "average", "queue", "and", "reset", "it", ".", "For", "example", "called", "by", "the", "StatsCollector", "once", "per", "second", "to", "calculate", "per", "-", "second", "average", "." ]
train
https://github.com/flaviogrossi/sockjs-cyclone/blob/d3ca053ec1aa1e85f652347bff562c2319be37a2/sockjs/cyclone/stats.py#L29-L50
cosven/feeluown-core
fuocore/qqmusic/api.py
API.artist_detail
def artist_detail(self, artist_id): """获取歌手详情""" path = '/v8/fcg-bin/fcg_v8_singer_track_cp.fcg' url = api_base_url + path params = { 'singerid': artist_id, 'songstatus': 1, 'order': 'listen', 'begin': 0, 'num': 50, 'from': 'h5', 'platform': 'h5page', } resp = requests.get(url, params=params, timeout=self._timeout) rv = resp.json() return rv['data']
python
def artist_detail(self, artist_id): """获取歌手详情""" path = '/v8/fcg-bin/fcg_v8_singer_track_cp.fcg' url = api_base_url + path params = { 'singerid': artist_id, 'songstatus': 1, 'order': 'listen', 'begin': 0, 'num': 50, 'from': 'h5', 'platform': 'h5page', } resp = requests.get(url, params=params, timeout=self._timeout) rv = resp.json() return rv['data']
[ "def", "artist_detail", "(", "self", ",", "artist_id", ")", ":", "path", "=", "'/v8/fcg-bin/fcg_v8_singer_track_cp.fcg'", "url", "=", "api_base_url", "+", "path", "params", "=", "{", "'singerid'", ":", "artist_id", ",", "'songstatus'", ":", "1", ",", "'order'", ...
获取歌手详情
[ "获取歌手详情" ]
train
https://github.com/cosven/feeluown-core/blob/62dc64638f62971b16be0a75c0b8c7ae2999869e/fuocore/qqmusic/api.py#L105-L120
paddycarey/mopidy-webhooks
mopidy_webhooks/utils.py
send_webhook
def send_webhook(config, payload): """Sends a HTTP request to the configured server. All exceptions are suppressed but emit a warning message in the log. """ try: response = requests.post( config['webhook_url'], data=json.dumps(payload, cls=ModelJSONEncoder), headers={config['api_key_header_name']: config['api_key']}, ) except Exception as e: logger.warning('Unable to send webhook: ({1}) {2}'.format( e.__class__.__name__, e.message, )) else: logger.debug('Webhook response: ({0}) {1}'.format( response.status_code, response.text, ))
python
def send_webhook(config, payload): """Sends a HTTP request to the configured server. All exceptions are suppressed but emit a warning message in the log. """ try: response = requests.post( config['webhook_url'], data=json.dumps(payload, cls=ModelJSONEncoder), headers={config['api_key_header_name']: config['api_key']}, ) except Exception as e: logger.warning('Unable to send webhook: ({1}) {2}'.format( e.__class__.__name__, e.message, )) else: logger.debug('Webhook response: ({0}) {1}'.format( response.status_code, response.text, ))
[ "def", "send_webhook", "(", "config", ",", "payload", ")", ":", "try", ":", "response", "=", "requests", ".", "post", "(", "config", "[", "'webhook_url'", "]", ",", "data", "=", "json", ".", "dumps", "(", "payload", ",", "cls", "=", "ModelJSONEncoder", ...
Sends a HTTP request to the configured server. All exceptions are suppressed but emit a warning message in the log.
[ "Sends", "a", "HTTP", "request", "to", "the", "configured", "server", "." ]
train
https://github.com/paddycarey/mopidy-webhooks/blob/813b57333d96837a5ed91b00e389383815686908/mopidy_webhooks/utils.py#L16-L36
CivicSpleen/ambry
ambry/library/warehouse.py
Warehouse.clean
def clean(self): """Remove all of the tables and data from the warehouse""" connection = self._backend._get_connection() self._backend.clean(connection)
python
def clean(self): """Remove all of the tables and data from the warehouse""" connection = self._backend._get_connection() self._backend.clean(connection)
[ "def", "clean", "(", "self", ")", ":", "connection", "=", "self", ".", "_backend", ".", "_get_connection", "(", ")", "self", ".", "_backend", ".", "clean", "(", "connection", ")" ]
Remove all of the tables and data from the warehouse
[ "Remove", "all", "of", "the", "tables", "and", "data", "from", "the", "warehouse" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L79-L82
CivicSpleen/ambry
ambry/library/warehouse.py
Warehouse.list
def list(self): """List the tables in the database""" connection = self._backend._get_connection() return list(self._backend.list(connection))
python
def list(self): """List the tables in the database""" connection = self._backend._get_connection() return list(self._backend.list(connection))
[ "def", "list", "(", "self", ")", ":", "connection", "=", "self", ".", "_backend", ".", "_get_connection", "(", ")", "return", "list", "(", "self", ".", "_backend", ".", "list", "(", "connection", ")", ")" ]
List the tables in the database
[ "List", "the", "tables", "in", "the", "database" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L84-L87
CivicSpleen/ambry
ambry/library/warehouse.py
Warehouse.install
def install(self, ref, table_name=None, index_columns=None,logger=None): """ Finds partition by reference and installs it to warehouse db. Args: ref (str): id, vid (versioned id), name or vname (versioned name) of the partition. """ try: obj_number = ObjectNumber.parse(ref) if isinstance(obj_number, TableNumber): table = self._library.table(ref) connection = self._backend._get_connection() return self._backend.install_table(connection, table, logger=logger) else: # assume partition raise NotObjectNumberError except NotObjectNumberError: # assume partition. partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install( connection, partition, table_name=table_name, index_columns=index_columns, logger=logger)
python
def install(self, ref, table_name=None, index_columns=None,logger=None): """ Finds partition by reference and installs it to warehouse db. Args: ref (str): id, vid (versioned id), name or vname (versioned name) of the partition. """ try: obj_number = ObjectNumber.parse(ref) if isinstance(obj_number, TableNumber): table = self._library.table(ref) connection = self._backend._get_connection() return self._backend.install_table(connection, table, logger=logger) else: # assume partition raise NotObjectNumberError except NotObjectNumberError: # assume partition. partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install( connection, partition, table_name=table_name, index_columns=index_columns, logger=logger)
[ "def", "install", "(", "self", ",", "ref", ",", "table_name", "=", "None", ",", "index_columns", "=", "None", ",", "logger", "=", "None", ")", ":", "try", ":", "obj_number", "=", "ObjectNumber", ".", "parse", "(", "ref", ")", "if", "isinstance", "(", ...
Finds partition by reference and installs it to warehouse db. Args: ref (str): id, vid (versioned id), name or vname (versioned name) of the partition.
[ "Finds", "partition", "by", "reference", "and", "installs", "it", "to", "warehouse", "db", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L94-L120
CivicSpleen/ambry
ambry/library/warehouse.py
Warehouse.materialize
def materialize(self, ref, table_name=None, index_columns=None, logger=None): """ Creates materialized table for given partition reference. Args: ref (str): id, vid, name or vname of the partition. Returns: str: name of the partition table in the database. """ from ambry.library import Library assert isinstance(self._library, Library) logger.debug('Materializing warehouse partition.\n partition: {}'.format(ref)) partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install(connection, partition, table_name=table_name, index_columns=index_columns, materialize=True, logger=logger)
python
def materialize(self, ref, table_name=None, index_columns=None, logger=None): """ Creates materialized table for given partition reference. Args: ref (str): id, vid, name or vname of the partition. Returns: str: name of the partition table in the database. """ from ambry.library import Library assert isinstance(self._library, Library) logger.debug('Materializing warehouse partition.\n partition: {}'.format(ref)) partition = self._library.partition(ref) connection = self._backend._get_connection() return self._backend.install(connection, partition, table_name=table_name, index_columns=index_columns, materialize=True, logger=logger)
[ "def", "materialize", "(", "self", ",", "ref", ",", "table_name", "=", "None", ",", "index_columns", "=", "None", ",", "logger", "=", "None", ")", ":", "from", "ambry", ".", "library", "import", "Library", "assert", "isinstance", "(", "self", ".", "_libr...
Creates materialized table for given partition reference. Args: ref (str): id, vid, name or vname of the partition. Returns: str: name of the partition table in the database.
[ "Creates", "materialized", "table", "for", "given", "partition", "reference", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L122-L141
CivicSpleen/ambry
ambry/library/warehouse.py
Warehouse.index
def index(self, ref, columns): """ Create an index on the columns. Args: ref (str): id, vid, name or versioned name of the partition. columns (list of str): names of the columns needed indexes. """ from ambry.orm.exc import NotFoundError logger.debug('Creating index for partition.\n ref: {}, columns: {}'.format(ref, columns)) connection = self._backend._get_connection() try: table_or_partition = self._library.partition(ref) except NotFoundError: table_or_partition = ref self._backend.index(connection, table_or_partition, columns)
python
def index(self, ref, columns): """ Create an index on the columns. Args: ref (str): id, vid, name or versioned name of the partition. columns (list of str): names of the columns needed indexes. """ from ambry.orm.exc import NotFoundError logger.debug('Creating index for partition.\n ref: {}, columns: {}'.format(ref, columns)) connection = self._backend._get_connection() try: table_or_partition = self._library.partition(ref) except NotFoundError: table_or_partition = ref self._backend.index(connection, table_or_partition, columns)
[ "def", "index", "(", "self", ",", "ref", ",", "columns", ")", ":", "from", "ambry", ".", "orm", ".", "exc", "import", "NotFoundError", "logger", ".", "debug", "(", "'Creating index for partition.\\n ref: {}, columns: {}'", ".", "format", "(", "ref", ",", "c...
Create an index on the columns. Args: ref (str): id, vid, name or versioned name of the partition. columns (list of str): names of the columns needed indexes.
[ "Create", "an", "index", "on", "the", "columns", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L143-L163
CivicSpleen/ambry
ambry/library/warehouse.py
Warehouse.parse_sql
def parse_sql(self, asql): """ Executes all sql statements from asql. Args: library (library.Library): asql (str): ambry sql query - see https://github.com/CivicKnowledge/ambry/issues/140 for details. """ import sqlparse statements = sqlparse.parse(sqlparse.format(asql, strip_comments=True)) parsed_statements = [] for statement in statements: statement_str = statement.to_unicode().strip() for preprocessor in self._backend.sql_processors(): statement_str = preprocessor(statement_str, self._library, self._backend, self.connection) parsed_statements.append(statement_str) return parsed_statements
python
def parse_sql(self, asql): """ Executes all sql statements from asql. Args: library (library.Library): asql (str): ambry sql query - see https://github.com/CivicKnowledge/ambry/issues/140 for details. """ import sqlparse statements = sqlparse.parse(sqlparse.format(asql, strip_comments=True)) parsed_statements = [] for statement in statements: statement_str = statement.to_unicode().strip() for preprocessor in self._backend.sql_processors(): statement_str = preprocessor(statement_str, self._library, self._backend, self.connection) parsed_statements.append(statement_str) return parsed_statements
[ "def", "parse_sql", "(", "self", ",", "asql", ")", ":", "import", "sqlparse", "statements", "=", "sqlparse", ".", "parse", "(", "sqlparse", ".", "format", "(", "asql", ",", "strip_comments", "=", "True", ")", ")", "parsed_statements", "=", "[", "]", "for...
Executes all sql statements from asql. Args: library (library.Library): asql (str): ambry sql query - see https://github.com/CivicKnowledge/ambry/issues/140 for details.
[ "Executes", "all", "sql", "statements", "from", "asql", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L165-L185
CivicSpleen/ambry
ambry/library/warehouse.py
Warehouse.query
def query(self, asql, logger=None): """ Execute an ASQL file and return the result of the first SELECT statement. :param asql: :param logger: :return: """ import sqlparse from ambry.mprlib.exceptions import BadSQLError from ambry.bundle.asql_parser import process_sql from ambry.orm.exc import NotFoundError if not logger: logger = self._library.logger rec = process_sql(asql, self._library) for drop in reversed(rec.drop): if drop: connection = self._backend._get_connection() cursor = self._backend.query(connection, drop, fetch=False) cursor.close() for vid in rec.materialize: logger.debug('Materialize {}'.format(vid)) self.materialize(vid, logger=logger) for vid in rec.install: logger.debug('Install {}'.format(vid)) self.install(vid, logger=logger) for statement in rec.statements: statement = statement.strip() logger.debug("Process statement: {}".format(statement[:60])) if statement.lower().startswith('create'): logger.debug(' Create {}'.format(statement)) connection = self._backend._get_connection() cursor = self._backend.query(connection, statement, fetch=False) cursor.close() elif statement.lower().startswith('select'): logger.debug('Run query {}'.format(statement)) connection = self._backend._get_connection() return self._backend.query(connection, statement, fetch=False) for table_or_vid, columns in rec.indexes: logger.debug('Index {}'.format(table_or_vid)) try: self.index(table_or_vid, columns) except NotFoundError as e: # Comon when the index table in's a VID, so no partition can be found. logger.debug('Failed to index {}; {}'.format(vid, e)) except Exception as e: logger.error('Failed to index {}; {}'.format(vid, e)) # A fake cursor that can be closed and iterated class closable_iterable(object): def close(self): pass def __iter__(self): pass return closable_iterable()
python
def query(self, asql, logger=None): """ Execute an ASQL file and return the result of the first SELECT statement. :param asql: :param logger: :return: """ import sqlparse from ambry.mprlib.exceptions import BadSQLError from ambry.bundle.asql_parser import process_sql from ambry.orm.exc import NotFoundError if not logger: logger = self._library.logger rec = process_sql(asql, self._library) for drop in reversed(rec.drop): if drop: connection = self._backend._get_connection() cursor = self._backend.query(connection, drop, fetch=False) cursor.close() for vid in rec.materialize: logger.debug('Materialize {}'.format(vid)) self.materialize(vid, logger=logger) for vid in rec.install: logger.debug('Install {}'.format(vid)) self.install(vid, logger=logger) for statement in rec.statements: statement = statement.strip() logger.debug("Process statement: {}".format(statement[:60])) if statement.lower().startswith('create'): logger.debug(' Create {}'.format(statement)) connection = self._backend._get_connection() cursor = self._backend.query(connection, statement, fetch=False) cursor.close() elif statement.lower().startswith('select'): logger.debug('Run query {}'.format(statement)) connection = self._backend._get_connection() return self._backend.query(connection, statement, fetch=False) for table_or_vid, columns in rec.indexes: logger.debug('Index {}'.format(table_or_vid)) try: self.index(table_or_vid, columns) except NotFoundError as e: # Comon when the index table in's a VID, so no partition can be found. logger.debug('Failed to index {}; {}'.format(vid, e)) except Exception as e: logger.error('Failed to index {}; {}'.format(vid, e)) # A fake cursor that can be closed and iterated class closable_iterable(object): def close(self): pass def __iter__(self): pass return closable_iterable()
[ "def", "query", "(", "self", ",", "asql", ",", "logger", "=", "None", ")", ":", "import", "sqlparse", "from", "ambry", ".", "mprlib", ".", "exceptions", "import", "BadSQLError", "from", "ambry", ".", "bundle", ".", "asql_parser", "import", "process_sql", "...
Execute an ASQL file and return the result of the first SELECT statement. :param asql: :param logger: :return:
[ "Execute", "an", "ASQL", "file", "and", "return", "the", "result", "of", "the", "first", "SELECT", "statement", "." ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L187-L261
CivicSpleen/ambry
ambry/library/warehouse.py
Warehouse.dataframe
def dataframe(self,asql, logger = None): """Like query(), but returns a Pandas dataframe""" import pandas as pd from ambry.mprlib.exceptions import BadSQLError try: def yielder(cursor): for i, row in enumerate(cursor): if i == 0: yield [ e[0] for e in cursor.getdescription()] yield row cursor = self.query(asql, logger) yld = yielder(cursor) header = next(yld) return pd.DataFrame(yld, columns=header) except BadSQLError as e: import traceback self._logger.error("SQL Error: {}".format( e)) self._logger.debug(traceback.format_exc())
python
def dataframe(self,asql, logger = None): """Like query(), but returns a Pandas dataframe""" import pandas as pd from ambry.mprlib.exceptions import BadSQLError try: def yielder(cursor): for i, row in enumerate(cursor): if i == 0: yield [ e[0] for e in cursor.getdescription()] yield row cursor = self.query(asql, logger) yld = yielder(cursor) header = next(yld) return pd.DataFrame(yld, columns=header) except BadSQLError as e: import traceback self._logger.error("SQL Error: {}".format( e)) self._logger.debug(traceback.format_exc())
[ "def", "dataframe", "(", "self", ",", "asql", ",", "logger", "=", "None", ")", ":", "import", "pandas", "as", "pd", "from", "ambry", ".", "mprlib", ".", "exceptions", "import", "BadSQLError", "try", ":", "def", "yielder", "(", "cursor", ")", ":", "for"...
Like query(), but returns a Pandas dataframe
[ "Like", "query", "()", "but", "returns", "a", "Pandas", "dataframe" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L263-L287
CivicSpleen/ambry
ambry/library/warehouse.py
Warehouse.geoframe
def geoframe(self, sql, simplify=None, crs=None, epsg=4326): """ Return geopandas dataframe :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param crs: Coordinate reference system information :param epsg: Specifiy the CRS as an EPGS number. :return: A Geopandas GeoDataFrame """ import geopandas from shapely.wkt import loads from fiona.crs import from_epsg if crs is None: try: crs = from_epsg(epsg) except TypeError: raise TypeError('Must set either crs or epsg for output.') df = self.dataframe(sql) geometry = df['geometry'] if simplify: s = geometry.apply(lambda x: loads(x).simplify(simplify)) else: s = geometry.apply(lambda x: loads(x)) df['geometry'] = geopandas.GeoSeries(s) return geopandas.GeoDataFrame(df, crs=crs, geometry='geometry')
python
def geoframe(self, sql, simplify=None, crs=None, epsg=4326): """ Return geopandas dataframe :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param crs: Coordinate reference system information :param epsg: Specifiy the CRS as an EPGS number. :return: A Geopandas GeoDataFrame """ import geopandas from shapely.wkt import loads from fiona.crs import from_epsg if crs is None: try: crs = from_epsg(epsg) except TypeError: raise TypeError('Must set either crs or epsg for output.') df = self.dataframe(sql) geometry = df['geometry'] if simplify: s = geometry.apply(lambda x: loads(x).simplify(simplify)) else: s = geometry.apply(lambda x: loads(x)) df['geometry'] = geopandas.GeoSeries(s) return geopandas.GeoDataFrame(df, crs=crs, geometry='geometry')
[ "def", "geoframe", "(", "self", ",", "sql", ",", "simplify", "=", "None", ",", "crs", "=", "None", ",", "epsg", "=", "4326", ")", ":", "import", "geopandas", "from", "shapely", ".", "wkt", "import", "loads", "from", "fiona", ".", "crs", "import", "fr...
Return geopandas dataframe :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param crs: Coordinate reference system information :param epsg: Specifiy the CRS as an EPGS number. :return: A Geopandas GeoDataFrame
[ "Return", "geopandas", "dataframe" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L289-L318
CivicSpleen/ambry
ambry/library/warehouse.py
Warehouse.shapes
def shapes(self, simplify=None): """ Return geodata as a list of Shapely shapes :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param predicate: A single-argument function to select which records to include in the output. :return: A list of Shapely objects """ from shapely.wkt import loads if simplify: return [loads(row.geometry).simplify(simplify) for row in self] else: return [loads(row.geometry) for row in self]
python
def shapes(self, simplify=None): """ Return geodata as a list of Shapely shapes :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param predicate: A single-argument function to select which records to include in the output. :return: A list of Shapely objects """ from shapely.wkt import loads if simplify: return [loads(row.geometry).simplify(simplify) for row in self] else: return [loads(row.geometry) for row in self]
[ "def", "shapes", "(", "self", ",", "simplify", "=", "None", ")", ":", "from", "shapely", ".", "wkt", "import", "loads", "if", "simplify", ":", "return", "[", "loads", "(", "row", ".", "geometry", ")", ".", "simplify", "(", "simplify", ")", "for", "ro...
Return geodata as a list of Shapely shapes :param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry. :param predicate: A single-argument function to select which records to include in the output. :return: A list of Shapely objects
[ "Return", "geodata", "as", "a", "list", "of", "Shapely", "shapes" ]
train
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/library/warehouse.py#L320-L335
PythonRails/rails
rails/request.py
Request.get_url_param
def get_url_param(self, index, default=None): """ Return url parameter with given index. Args: - index: starts from zero, and come after controller and action names in url. """ params = self.get_url_params() return params[index] if index < len(params) else default
python
def get_url_param(self, index, default=None): """ Return url parameter with given index. Args: - index: starts from zero, and come after controller and action names in url. """ params = self.get_url_params() return params[index] if index < len(params) else default
[ "def", "get_url_param", "(", "self", ",", "index", ",", "default", "=", "None", ")", ":", "params", "=", "self", ".", "get_url_params", "(", ")", "return", "params", "[", "index", "]", "if", "index", "<", "len", "(", "params", ")", "else", "default" ]
Return url parameter with given index. Args: - index: starts from zero, and come after controller and action names in url.
[ "Return", "url", "parameter", "with", "given", "index", "." ]
train
https://github.com/PythonRails/rails/blob/1e199b9da4da5b24fef39fc6212d71fc9fbb18a5/rails/request.py#L32-L41
edoburu/django-any-urlfield
any_urlfield/registry.py
UrlType.get_widget
def get_widget(self): """ Create the widget for the URL type. """ form_field = self.get_form_field() widget = form_field.widget if isinstance(widget, type): widget = widget() # Widget instantiation needs to happen manually. # Auto skip if choices is not an existing attribute. form_field_choices = getattr(form_field, 'choices', None) if form_field_choices is not None: if hasattr(widget, 'choices'): widget.choices = form_field_choices return widget
python
def get_widget(self): """ Create the widget for the URL type. """ form_field = self.get_form_field() widget = form_field.widget if isinstance(widget, type): widget = widget() # Widget instantiation needs to happen manually. # Auto skip if choices is not an existing attribute. form_field_choices = getattr(form_field, 'choices', None) if form_field_choices is not None: if hasattr(widget, 'choices'): widget.choices = form_field_choices return widget
[ "def", "get_widget", "(", "self", ")", ":", "form_field", "=", "self", ".", "get_form_field", "(", ")", "widget", "=", "form_field", ".", "widget", "if", "isinstance", "(", "widget", ",", "type", ")", ":", "widget", "=", "widget", "(", ")", "# Widget ins...
Create the widget for the URL type.
[ "Create", "the", "widget", "for", "the", "URL", "type", "." ]
train
https://github.com/edoburu/django-any-urlfield/blob/8d7d36c8a1fc251930f6dbdcc8b5b5151d20e3ab/any_urlfield/registry.py#L57-L72
edoburu/django-any-urlfield
any_urlfield/registry.py
UrlTypeRegistry.register
def register(self, ModelClass, form_field=None, widget=None, title=None, prefix=None, has_id_value=True): """ Register a custom model with the ``AnyUrlField``. """ if any(urltype.model == ModelClass for urltype in self._url_types): raise ValueError("Model is already registered: '{0}'".format(ModelClass)) opts = ModelClass._meta opts = opts.concrete_model._meta if not prefix: # Store something descriptive, easier to lookup from raw database content. prefix = '{0}.{1}'.format(opts.app_label, opts.object_name.lower()) if not title: title = ModelClass._meta.verbose_name if self.is_external_url_prefix(prefix): raise ValueError("Invalid prefix value: '{0}'.".format(prefix)) if self[prefix] is not None: raise ValueError("Prefix is already registered: '{0}'".format(prefix)) if form_field is not None and widget is not None: raise ValueError("Provide either a form_field or widget; use the widget parameter of the form field instead.") urltype = UrlType(ModelClass, form_field, widget, title, prefix, has_id_value) signals.post_save.connect(_on_model_save, sender=ModelClass) self._url_types.append(urltype) return urltype
python
def register(self, ModelClass, form_field=None, widget=None, title=None, prefix=None, has_id_value=True): """ Register a custom model with the ``AnyUrlField``. """ if any(urltype.model == ModelClass for urltype in self._url_types): raise ValueError("Model is already registered: '{0}'".format(ModelClass)) opts = ModelClass._meta opts = opts.concrete_model._meta if not prefix: # Store something descriptive, easier to lookup from raw database content. prefix = '{0}.{1}'.format(opts.app_label, opts.object_name.lower()) if not title: title = ModelClass._meta.verbose_name if self.is_external_url_prefix(prefix): raise ValueError("Invalid prefix value: '{0}'.".format(prefix)) if self[prefix] is not None: raise ValueError("Prefix is already registered: '{0}'".format(prefix)) if form_field is not None and widget is not None: raise ValueError("Provide either a form_field or widget; use the widget parameter of the form field instead.") urltype = UrlType(ModelClass, form_field, widget, title, prefix, has_id_value) signals.post_save.connect(_on_model_save, sender=ModelClass) self._url_types.append(urltype) return urltype
[ "def", "register", "(", "self", ",", "ModelClass", ",", "form_field", "=", "None", ",", "widget", "=", "None", ",", "title", "=", "None", ",", "prefix", "=", "None", ",", "has_id_value", "=", "True", ")", ":", "if", "any", "(", "urltype", ".", "model...
Register a custom model with the ``AnyUrlField``.
[ "Register", "a", "custom", "model", "with", "the", "AnyUrlField", "." ]
train
https://github.com/edoburu/django-any-urlfield/blob/8d7d36c8a1fc251930f6dbdcc8b5b5151d20e3ab/any_urlfield/registry.py#L90-L116
edoburu/django-any-urlfield
any_urlfield/registry.py
UrlTypeRegistry.get_for_model
def get_for_model(self, ModelClass): """ Return the URL type for a given model class """ for urltype in self._url_types: if urltype.model is ModelClass: return urltype return None
python
def get_for_model(self, ModelClass): """ Return the URL type for a given model class """ for urltype in self._url_types: if urltype.model is ModelClass: return urltype return None
[ "def", "get_for_model", "(", "self", ",", "ModelClass", ")", ":", "for", "urltype", "in", "self", ".", "_url_types", ":", "if", "urltype", ".", "model", "is", "ModelClass", ":", "return", "urltype", "return", "None" ]
Return the URL type for a given model class
[ "Return", "the", "URL", "type", "for", "a", "given", "model", "class" ]
train
https://github.com/edoburu/django-any-urlfield/blob/8d7d36c8a1fc251930f6dbdcc8b5b5151d20e3ab/any_urlfield/registry.py#L136-L143
edoburu/django-any-urlfield
any_urlfield/registry.py
UrlTypeRegistry.index
def index(self, prefix): """ Return the model index for a prefix. """ # Any web domain will be handled by the standard URLField. if self.is_external_url_prefix(prefix): prefix = 'http' for i, urltype in enumerate(self._url_types): if urltype.prefix == prefix: return i return None
python
def index(self, prefix): """ Return the model index for a prefix. """ # Any web domain will be handled by the standard URLField. if self.is_external_url_prefix(prefix): prefix = 'http' for i, urltype in enumerate(self._url_types): if urltype.prefix == prefix: return i return None
[ "def", "index", "(", "self", ",", "prefix", ")", ":", "# Any web domain will be handled by the standard URLField.", "if", "self", ".", "is_external_url_prefix", "(", "prefix", ")", ":", "prefix", "=", "'http'", "for", "i", ",", "urltype", "in", "enumerate", "(", ...
Return the model index for a prefix.
[ "Return", "the", "model", "index", "for", "a", "prefix", "." ]
train
https://github.com/edoburu/django-any-urlfield/blob/8d7d36c8a1fc251930f6dbdcc8b5b5151d20e3ab/any_urlfield/registry.py#L155-L166
caktus/rapidsms-tropo
rtropo/views.py
message_received
def message_received(request, backend_name): """Handle HTTP requests from Tropo. """ logger.debug("@@ request from Tropo - raw data: %s" % request.body) try: post = json.loads(request.body) except ValueError: logger.exception("EXCEPTION decoding post data in incoming request") return HttpResponseBadRequest() except Exception: logger.exception("@@responding to tropo with error") return HttpResponseServerError() logger.debug("@@ Decoded data: %r" % post) if 'session' not in post: logger.error("@@HEY, post does not contain session, " "what's going on?") return HttpResponseBadRequest() session = post['session'] parms = session.get('parameters', {}) if 'program' in parms: # Execute a program that we passed to Tropo to pass back to us. # Extract the program, while verifying it came from us and # has not been modified. try: program = signing.loads(parms['program']) except signing.BadSignature: logger.exception("@@ received program with bad signature") return HttpResponseBadRequest() return HttpResponse(json.dumps(program)) if 'from' in session: # Must have received a message # FIXME: is there any way we can verify it's really Tropo calling us? logger.debug("@@Got a text message") try: from_address = session['from']['id'] text = session['initialText'] logger.debug("@@Received message from %s: %s" % (from_address, text)) # pass the message to RapidSMS identity = from_address connections = lookup_connections(backend_name, [identity]) receive(text, connections[0]) # Respond nicely to Tropo program = json.dumps({"tropo": [{"hangup": {}}]}) logger.debug("@@responding to tropo with hangup") return HttpResponse(program) except Exception: logger.exception("@@responding to tropo with error") return HttpResponseServerError() logger.error("@@No recognized command in request from Tropo") return HttpResponseBadRequest()
python
def message_received(request, backend_name): """Handle HTTP requests from Tropo. """ logger.debug("@@ request from Tropo - raw data: %s" % request.body) try: post = json.loads(request.body) except ValueError: logger.exception("EXCEPTION decoding post data in incoming request") return HttpResponseBadRequest() except Exception: logger.exception("@@responding to tropo with error") return HttpResponseServerError() logger.debug("@@ Decoded data: %r" % post) if 'session' not in post: logger.error("@@HEY, post does not contain session, " "what's going on?") return HttpResponseBadRequest() session = post['session'] parms = session.get('parameters', {}) if 'program' in parms: # Execute a program that we passed to Tropo to pass back to us. # Extract the program, while verifying it came from us and # has not been modified. try: program = signing.loads(parms['program']) except signing.BadSignature: logger.exception("@@ received program with bad signature") return HttpResponseBadRequest() return HttpResponse(json.dumps(program)) if 'from' in session: # Must have received a message # FIXME: is there any way we can verify it's really Tropo calling us? logger.debug("@@Got a text message") try: from_address = session['from']['id'] text = session['initialText'] logger.debug("@@Received message from %s: %s" % (from_address, text)) # pass the message to RapidSMS identity = from_address connections = lookup_connections(backend_name, [identity]) receive(text, connections[0]) # Respond nicely to Tropo program = json.dumps({"tropo": [{"hangup": {}}]}) logger.debug("@@responding to tropo with hangup") return HttpResponse(program) except Exception: logger.exception("@@responding to tropo with error") return HttpResponseServerError() logger.error("@@No recognized command in request from Tropo") return HttpResponseBadRequest()
[ "def", "message_received", "(", "request", ",", "backend_name", ")", ":", "logger", ".", "debug", "(", "\"@@ request from Tropo - raw data: %s\"", "%", "request", ".", "body", ")", "try", ":", "post", "=", "json", ".", "loads", "(", "request", ".", "body", "...
Handle HTTP requests from Tropo.
[ "Handle", "HTTP", "requests", "from", "Tropo", "." ]
train
https://github.com/caktus/rapidsms-tropo/blob/7680f351053837f210bf90576fa8f947c8f1c5c6/rtropo/views.py#L21-L81
project-ncl/pnc-cli
pnc_cli/runningbuilds.py
list_running_builds
def list_running_builds(page_size=200, page_index=0, sort=""): """ List all RunningBuilds """ content = list_running_builds_raw(page_size, page_index, sort) if content: return utils.format_json_list(content)
python
def list_running_builds(page_size=200, page_index=0, sort=""): """ List all RunningBuilds """ content = list_running_builds_raw(page_size, page_index, sort) if content: return utils.format_json_list(content)
[ "def", "list_running_builds", "(", "page_size", "=", "200", ",", "page_index", "=", "0", ",", "sort", "=", "\"\"", ")", ":", "content", "=", "list_running_builds_raw", "(", "page_size", ",", "page_index", ",", "sort", ")", "if", "content", ":", "return", "...
List all RunningBuilds
[ "List", "all", "RunningBuilds" ]
train
https://github.com/project-ncl/pnc-cli/blob/3dc149bf84928f60a8044ac50b58bbaddd451902/pnc_cli/runningbuilds.py#L13-L19
flaviogrossi/sockjs-cyclone
sockjs/cyclone/router.py
SockJSRouter.create_session
def create_session(self, session_id, register=True, session_factory=None): """ Creates new session object and returns it. @param session_id: Session id. If not provided, will generate a new session id. @param register: Should be the session registered in a storage. Websockets don't need it. @param session_factory: Use the given (class, args, kwargs) tuple to create the session. Class should derive from `BaseSession`. Normally not needed. """ if session_factory is not None: # use custom class to create session sess_factory, sess_args, sess_kwargs = session_factory s = sess_factory(*sess_args, **sess_kwargs) else: # use default session and arguments if not using a custom session # factory s = session.Session(self._connection, self, session_id, self.settings.get('disconnect_delay')) if register: self._sessions.add(s) return s
python
def create_session(self, session_id, register=True, session_factory=None): """ Creates new session object and returns it. @param session_id: Session id. If not provided, will generate a new session id. @param register: Should be the session registered in a storage. Websockets don't need it. @param session_factory: Use the given (class, args, kwargs) tuple to create the session. Class should derive from `BaseSession`. Normally not needed. """ if session_factory is not None: # use custom class to create session sess_factory, sess_args, sess_kwargs = session_factory s = sess_factory(*sess_args, **sess_kwargs) else: # use default session and arguments if not using a custom session # factory s = session.Session(self._connection, self, session_id, self.settings.get('disconnect_delay')) if register: self._sessions.add(s) return s
[ "def", "create_session", "(", "self", ",", "session_id", ",", "register", "=", "True", ",", "session_factory", "=", "None", ")", ":", "if", "session_factory", "is", "not", "None", ":", "# use custom class to create session", "sess_factory", ",", "sess_args", ",", ...
Creates new session object and returns it. @param session_id: Session id. If not provided, will generate a new session id. @param register: Should be the session registered in a storage. Websockets don't need it. @param session_factory: Use the given (class, args, kwargs) tuple to create the session. Class should derive from `BaseSession`. Normally not needed.
[ "Creates", "new", "session", "object", "and", "returns", "it", "." ]
train
https://github.com/flaviogrossi/sockjs-cyclone/blob/d3ca053ec1aa1e85f652347bff562c2319be37a2/sockjs/cyclone/router.py#L131-L158
flaviogrossi/sockjs-cyclone
sockjs/cyclone/router.py
SockJSRouter.broadcast
def broadcast(self, clients, msg): """ Optimized C{broadcast} implementation. Depending on type of the session, will json-encode message once and will call either C{send_message} or C{send_jsonifed}. @param clients: Clients iterable @param msg: Message to send """ json_msg = None count = 0 for c in clients: sess = c.session if not sess.is_closed: if sess.send_expects_json: if json_msg is None: json_msg = proto.json_encode(msg) sess.send_jsonified(json_msg, stats=False) else: sess.send_message(msg, stats=False) count += 1 self.stats.packSent(count)
python
def broadcast(self, clients, msg): """ Optimized C{broadcast} implementation. Depending on type of the session, will json-encode message once and will call either C{send_message} or C{send_jsonifed}. @param clients: Clients iterable @param msg: Message to send """ json_msg = None count = 0 for c in clients: sess = c.session if not sess.is_closed: if sess.send_expects_json: if json_msg is None: json_msg = proto.json_encode(msg) sess.send_jsonified(json_msg, stats=False) else: sess.send_message(msg, stats=False) count += 1 self.stats.packSent(count)
[ "def", "broadcast", "(", "self", ",", "clients", ",", "msg", ")", ":", "json_msg", "=", "None", "count", "=", "0", "for", "c", "in", "clients", ":", "sess", "=", "c", ".", "session", "if", "not", "sess", ".", "is_closed", ":", "if", "sess", ".", ...
Optimized C{broadcast} implementation. Depending on type of the session, will json-encode message once and will call either C{send_message} or C{send_jsonifed}. @param clients: Clients iterable @param msg: Message to send
[ "Optimized", "C", "{", "broadcast", "}", "implementation", ".", "Depending", "on", "type", "of", "the", "session", "will", "json", "-", "encode", "message", "once", "and", "will", "call", "either", "C", "{", "send_message", "}", "or", "C", "{", "send_jsoni...
train
https://github.com/flaviogrossi/sockjs-cyclone/blob/d3ca053ec1aa1e85f652347bff562c2319be37a2/sockjs/cyclone/router.py#L168-L193
SmartTeleMax/iktomi
iktomi/forms/convs.py
between
def between(min_value, max_value): 'Numerical values limit' message = N_('value should be between %(min)d and %(max)d') % \ dict(min=min_value, max=max_value) @validator(message) def wrapper(conv, value): if value is None: # it meens that this value is not required return True if value < min_value: return False if value > max_value: return False return True return wrapper
python
def between(min_value, max_value): 'Numerical values limit' message = N_('value should be between %(min)d and %(max)d') % \ dict(min=min_value, max=max_value) @validator(message) def wrapper(conv, value): if value is None: # it meens that this value is not required return True if value < min_value: return False if value > max_value: return False return True return wrapper
[ "def", "between", "(", "min_value", ",", "max_value", ")", ":", "message", "=", "N_", "(", "'value should be between %(min)d and %(max)d'", ")", "%", "dict", "(", "min", "=", "min_value", ",", "max", "=", "max_value", ")", "@", "validator", "(", "message", "...
Numerical values limit
[ "Numerical", "values", "limit" ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/forms/convs.py#L250-L265
SmartTeleMax/iktomi
iktomi/forms/convs.py
Converter.accept
def accept(self, value, silent=False): ''' Accepts a value from the form, calls :meth:`to_python` method, checks `required` condition, applies filters and validators, catches ValidationError. :param value: a value to be accepted :param silent=False: write errors to `form.errors` or not ''' try: value = self.to_python(value) for v in self.validators: value = v(self, value) if self.required and self._is_empty(value): raise ValidationError(self.error_required) except ValidationError as e: if not silent: e.fill_errors(self.field) #NOTE: by default value for field is in python_data, # but this is not true for FieldList where data # is dynamic, so we set value to None for absent value. value = self._existing_value return value
python
def accept(self, value, silent=False): ''' Accepts a value from the form, calls :meth:`to_python` method, checks `required` condition, applies filters and validators, catches ValidationError. :param value: a value to be accepted :param silent=False: write errors to `form.errors` or not ''' try: value = self.to_python(value) for v in self.validators: value = v(self, value) if self.required and self._is_empty(value): raise ValidationError(self.error_required) except ValidationError as e: if not silent: e.fill_errors(self.field) #NOTE: by default value for field is in python_data, # but this is not true for FieldList where data # is dynamic, so we set value to None for absent value. value = self._existing_value return value
[ "def", "accept", "(", "self", ",", "value", ",", "silent", "=", "False", ")", ":", "try", ":", "value", "=", "self", ".", "to_python", "(", "value", ")", "for", "v", "in", "self", ".", "validators", ":", "value", "=", "v", "(", "self", ",", "valu...
Accepts a value from the form, calls :meth:`to_python` method, checks `required` condition, applies filters and validators, catches ValidationError. :param value: a value to be accepted :param silent=False: write errors to `form.errors` or not
[ "Accepts", "a", "value", "from", "the", "form", "calls", ":", "meth", ":", "to_python", "method", "checks", "required", "condition", "applies", "filters", "and", "validators", "catches", "ValidationError", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/forms/convs.py#L135-L158
SmartTeleMax/iktomi
iktomi/forms/convs.py
CharBased.clean_value
def clean_value(self, value): ''' Additional clean action to preprocess value before :meth:`to_python` method. Subclasses may define own clean_value method to allow additional clean actions like html cleanup, etc. ''' # We have to clean before checking min/max length. It's done in # separate method to allow additional clean action in subclasses. if self.nontext_replacement is not None: value = replace_nontext(value, self.nontext_replacement) if self.strip: value = value.strip() return value
python
def clean_value(self, value): ''' Additional clean action to preprocess value before :meth:`to_python` method. Subclasses may define own clean_value method to allow additional clean actions like html cleanup, etc. ''' # We have to clean before checking min/max length. It's done in # separate method to allow additional clean action in subclasses. if self.nontext_replacement is not None: value = replace_nontext(value, self.nontext_replacement) if self.strip: value = value.strip() return value
[ "def", "clean_value", "(", "self", ",", "value", ")", ":", "# We have to clean before checking min/max length. It's done in", "# separate method to allow additional clean action in subclasses.", "if", "self", ".", "nontext_replacement", "is", "not", "None", ":", "value", "=", ...
Additional clean action to preprocess value before :meth:`to_python` method. Subclasses may define own clean_value method to allow additional clean actions like html cleanup, etc.
[ "Additional", "clean", "action", "to", "preprocess", "value", "before", ":", "meth", ":", "to_python", "method", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/forms/convs.py#L280-L294
SmartTeleMax/iktomi
iktomi/forms/convs.py
EnumChoice.options
def options(self): ''' Yields `(raw_value, label)` pairs for all acceptable choices. ''' conv = self.conv for python_value, label in self.choices: yield conv.from_python(python_value), label
python
def options(self): ''' Yields `(raw_value, label)` pairs for all acceptable choices. ''' conv = self.conv for python_value, label in self.choices: yield conv.from_python(python_value), label
[ "def", "options", "(", "self", ")", ":", "conv", "=", "self", ".", "conv", "for", "python_value", ",", "label", "in", "self", ".", "choices", ":", "yield", "conv", ".", "from_python", "(", "python_value", ")", ",", "label" ]
Yields `(raw_value, label)` pairs for all acceptable choices.
[ "Yields", "(", "raw_value", "label", ")", "pairs", "for", "all", "acceptable", "choices", "." ]
train
https://github.com/SmartTeleMax/iktomi/blob/80bc0f1408d63efe7f5844367d1f6efba44b35f2/iktomi/forms/convs.py#L400-L406
mithro/python-datetime-tz
datetime_tz/__init__.py
_tzinfome
def _tzinfome(tzinfo): """Gets a tzinfo object from a string. Args: tzinfo: A string (or string like) object, or a datetime.tzinfo object. Returns: An datetime.tzinfo object. Raises: UnknownTimeZoneError: If the timezone given can't be decoded. """ if not isinstance(tzinfo, datetime.tzinfo): try: tzinfo = pytz.timezone(tzinfo) assert tzinfo.zone in pytz.all_timezones except AttributeError: raise pytz.UnknownTimeZoneError("Unknown timezone! %s" % tzinfo) return tzinfo
python
def _tzinfome(tzinfo): """Gets a tzinfo object from a string. Args: tzinfo: A string (or string like) object, or a datetime.tzinfo object. Returns: An datetime.tzinfo object. Raises: UnknownTimeZoneError: If the timezone given can't be decoded. """ if not isinstance(tzinfo, datetime.tzinfo): try: tzinfo = pytz.timezone(tzinfo) assert tzinfo.zone in pytz.all_timezones except AttributeError: raise pytz.UnknownTimeZoneError("Unknown timezone! %s" % tzinfo) return tzinfo
[ "def", "_tzinfome", "(", "tzinfo", ")", ":", "if", "not", "isinstance", "(", "tzinfo", ",", "datetime", ".", "tzinfo", ")", ":", "try", ":", "tzinfo", "=", "pytz", ".", "timezone", "(", "tzinfo", ")", "assert", "tzinfo", ".", "zone", "in", "pytz", "....
Gets a tzinfo object from a string. Args: tzinfo: A string (or string like) object, or a datetime.tzinfo object. Returns: An datetime.tzinfo object. Raises: UnknownTimeZoneError: If the timezone given can't be decoded.
[ "Gets", "a", "tzinfo", "object", "from", "a", "string", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L85-L103
mithro/python-datetime-tz
datetime_tz/__init__.py
localize
def localize(dt, force_to_local=True): """Localize a datetime to the local timezone. If dt is naive, returns the same datetime with the local timezone, otherwise uses astimezone to convert. Args: dt: datetime object. force_to_local: Force all results to be in local time. Returns: A datetime_tz object. """ if not isinstance(dt, datetime_tz): if not dt.tzinfo: return datetime_tz(dt, tzinfo=localtz()) dt = datetime_tz(dt) if force_to_local: return dt.astimezone(localtz()) return dt
python
def localize(dt, force_to_local=True): """Localize a datetime to the local timezone. If dt is naive, returns the same datetime with the local timezone, otherwise uses astimezone to convert. Args: dt: datetime object. force_to_local: Force all results to be in local time. Returns: A datetime_tz object. """ if not isinstance(dt, datetime_tz): if not dt.tzinfo: return datetime_tz(dt, tzinfo=localtz()) dt = datetime_tz(dt) if force_to_local: return dt.astimezone(localtz()) return dt
[ "def", "localize", "(", "dt", ",", "force_to_local", "=", "True", ")", ":", "if", "not", "isinstance", "(", "dt", ",", "datetime_tz", ")", ":", "if", "not", "dt", ".", "tzinfo", ":", "return", "datetime_tz", "(", "dt", ",", "tzinfo", "=", "localtz", ...
Localize a datetime to the local timezone. If dt is naive, returns the same datetime with the local timezone, otherwise uses astimezone to convert. Args: dt: datetime object. force_to_local: Force all results to be in local time. Returns: A datetime_tz object.
[ "Localize", "a", "datetime", "to", "the", "local", "timezone", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L110-L129
mithro/python-datetime-tz
datetime_tz/__init__.py
get_naive
def get_naive(dt): """Gets a naive datetime from a datetime. datetime_tz objects can't just have tzinfo replaced with None, you need to call asdatetime. Args: dt: datetime object. Returns: datetime object without any timezone information. """ if not dt.tzinfo: return dt if hasattr(dt, "asdatetime"): return dt.asdatetime() return dt.replace(tzinfo=None)
python
def get_naive(dt): """Gets a naive datetime from a datetime. datetime_tz objects can't just have tzinfo replaced with None, you need to call asdatetime. Args: dt: datetime object. Returns: datetime object without any timezone information. """ if not dt.tzinfo: return dt if hasattr(dt, "asdatetime"): return dt.asdatetime() return dt.replace(tzinfo=None)
[ "def", "get_naive", "(", "dt", ")", ":", "if", "not", "dt", ".", "tzinfo", ":", "return", "dt", "if", "hasattr", "(", "dt", ",", "\"asdatetime\"", ")", ":", "return", "dt", ".", "asdatetime", "(", ")", "return", "dt", ".", "replace", "(", "tzinfo", ...
Gets a naive datetime from a datetime. datetime_tz objects can't just have tzinfo replaced with None, you need to call asdatetime. Args: dt: datetime object. Returns: datetime object without any timezone information.
[ "Gets", "a", "naive", "datetime", "from", "a", "datetime", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L132-L148
mithro/python-datetime-tz
datetime_tz/__init__.py
detect_timezone
def detect_timezone(): """Try and detect the timezone that Python is currently running in. We have a bunch of different methods for trying to figure this out (listed in order they are attempted). * In windows, use win32timezone.TimeZoneInfo.local() * Try TZ environment variable. * Try and find /etc/timezone file (with timezone name). * Try and find /etc/localtime file (with timezone data). * Try and match a TZ to the current dst/offset/shortname. Returns: The detected local timezone as a tzinfo object Raises: pytz.UnknownTimeZoneError: If it was unable to detect a timezone. """ if sys.platform == "win32": tz = _detect_timezone_windows() if tz is not None: return tz # First we try the TZ variable tz = _detect_timezone_environ() if tz is not None: return tz # Second we try /etc/timezone and use the value in that tz = _detect_timezone_etc_timezone() if tz is not None: return tz # Next we try and see if something matches the tzinfo in /etc/localtime tz = _detect_timezone_etc_localtime() if tz is not None: return tz # Next we try and use a similiar method to what PHP does. # We first try to search on time.tzname, time.timezone, time.daylight to # match a pytz zone. warnings.warn("Had to fall back to worst detection method (the 'PHP' " "method).") tz = _detect_timezone_php() if tz is not None: return tz raise pytz.UnknownTimeZoneError("Unable to detect your timezone!")
python
def detect_timezone(): """Try and detect the timezone that Python is currently running in. We have a bunch of different methods for trying to figure this out (listed in order they are attempted). * In windows, use win32timezone.TimeZoneInfo.local() * Try TZ environment variable. * Try and find /etc/timezone file (with timezone name). * Try and find /etc/localtime file (with timezone data). * Try and match a TZ to the current dst/offset/shortname. Returns: The detected local timezone as a tzinfo object Raises: pytz.UnknownTimeZoneError: If it was unable to detect a timezone. """ if sys.platform == "win32": tz = _detect_timezone_windows() if tz is not None: return tz # First we try the TZ variable tz = _detect_timezone_environ() if tz is not None: return tz # Second we try /etc/timezone and use the value in that tz = _detect_timezone_etc_timezone() if tz is not None: return tz # Next we try and see if something matches the tzinfo in /etc/localtime tz = _detect_timezone_etc_localtime() if tz is not None: return tz # Next we try and use a similiar method to what PHP does. # We first try to search on time.tzname, time.timezone, time.daylight to # match a pytz zone. warnings.warn("Had to fall back to worst detection method (the 'PHP' " "method).") tz = _detect_timezone_php() if tz is not None: return tz raise pytz.UnknownTimeZoneError("Unable to detect your timezone!")
[ "def", "detect_timezone", "(", ")", ":", "if", "sys", ".", "platform", "==", "\"win32\"", ":", "tz", "=", "_detect_timezone_windows", "(", ")", "if", "tz", "is", "not", "None", ":", "return", "tz", "# First we try the TZ variable", "tz", "=", "_detect_timezone...
Try and detect the timezone that Python is currently running in. We have a bunch of different methods for trying to figure this out (listed in order they are attempted). * In windows, use win32timezone.TimeZoneInfo.local() * Try TZ environment variable. * Try and find /etc/timezone file (with timezone name). * Try and find /etc/localtime file (with timezone data). * Try and match a TZ to the current dst/offset/shortname. Returns: The detected local timezone as a tzinfo object Raises: pytz.UnknownTimeZoneError: If it was unable to detect a timezone.
[ "Try", "and", "detect", "the", "timezone", "that", "Python", "is", "currently", "running", "in", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L183-L230
mithro/python-datetime-tz
datetime_tz/__init__.py
_load_local_tzinfo
def _load_local_tzinfo(): """Load zoneinfo from local disk.""" tzdir = os.environ.get("TZDIR", "/usr/share/zoneinfo/posix") localtzdata = {} for dirpath, _, filenames in os.walk(tzdir): for filename in filenames: filepath = os.path.join(dirpath, filename) name = os.path.relpath(filepath, tzdir) f = open(filepath, "rb") tzinfo = pytz.tzfile.build_tzinfo(name, f) f.close() localtzdata[name] = tzinfo return localtzdata
python
def _load_local_tzinfo(): """Load zoneinfo from local disk.""" tzdir = os.environ.get("TZDIR", "/usr/share/zoneinfo/posix") localtzdata = {} for dirpath, _, filenames in os.walk(tzdir): for filename in filenames: filepath = os.path.join(dirpath, filename) name = os.path.relpath(filepath, tzdir) f = open(filepath, "rb") tzinfo = pytz.tzfile.build_tzinfo(name, f) f.close() localtzdata[name] = tzinfo return localtzdata
[ "def", "_load_local_tzinfo", "(", ")", ":", "tzdir", "=", "os", ".", "environ", ".", "get", "(", "\"TZDIR\"", ",", "\"/usr/share/zoneinfo/posix\"", ")", "localtzdata", "=", "{", "}", "for", "dirpath", ",", "_", ",", "filenames", "in", "os", ".", "walk", ...
Load zoneinfo from local disk.
[ "Load", "zoneinfo", "from", "local", "disk", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L257-L272
mithro/python-datetime-tz
datetime_tz/__init__.py
_detect_timezone_etc_localtime
def _detect_timezone_etc_localtime(): """Detect timezone based on /etc/localtime file.""" matches = [] if os.path.exists("/etc/localtime"): f = open("/etc/localtime", "rb") localtime = pytz.tzfile.build_tzinfo("/etc/localtime", f) f.close() # We want to match against the local database because /etc/localtime will # be copied from that. Once we have found a name for /etc/localtime, we can # use the name to get the "same" timezone from the inbuilt pytz database. tzdatabase = _load_local_tzinfo() if tzdatabase: tznames = tzdatabase.keys() tzvalues = tzdatabase.__getitem__ else: tznames = pytz.all_timezones tzvalues = _tzinfome # See if we can find a "Human Name" for this.. for tzname in tznames: tz = tzvalues(tzname) if dir(tz) != dir(localtime): continue for attrib in dir(tz): # Ignore functions and specials if callable(getattr(tz, attrib)) or attrib.startswith("__"): continue # This will always be different if attrib == "zone" or attrib == "_tzinfos": continue if getattr(tz, attrib) != getattr(localtime, attrib): break # We get here iff break didn't happen, i.e. no meaningful attributes # differ between tz and localtime else: # Try and get a timezone from pytz which has the same name as the zone # which matches in the local database. if tzname not in pytz.all_timezones: warnings.warn("Skipping %s because not in pytz database." % tzname) continue matches.append(_tzinfome(tzname)) matches.sort(key=lambda x: x.zone) if len(matches) == 1: return matches[0] if len(matches) > 1: warnings.warn("We detected multiple matches for your /etc/localtime. " "(Matches where %s)" % matches) return matches[0] else: warnings.warn("We detected no matches for your /etc/localtime.") # Register /etc/localtime as the timezone loaded. pytz._tzinfo_cache["/etc/localtime"] = localtime return localtime
python
def _detect_timezone_etc_localtime(): """Detect timezone based on /etc/localtime file.""" matches = [] if os.path.exists("/etc/localtime"): f = open("/etc/localtime", "rb") localtime = pytz.tzfile.build_tzinfo("/etc/localtime", f) f.close() # We want to match against the local database because /etc/localtime will # be copied from that. Once we have found a name for /etc/localtime, we can # use the name to get the "same" timezone from the inbuilt pytz database. tzdatabase = _load_local_tzinfo() if tzdatabase: tznames = tzdatabase.keys() tzvalues = tzdatabase.__getitem__ else: tznames = pytz.all_timezones tzvalues = _tzinfome # See if we can find a "Human Name" for this.. for tzname in tznames: tz = tzvalues(tzname) if dir(tz) != dir(localtime): continue for attrib in dir(tz): # Ignore functions and specials if callable(getattr(tz, attrib)) or attrib.startswith("__"): continue # This will always be different if attrib == "zone" or attrib == "_tzinfos": continue if getattr(tz, attrib) != getattr(localtime, attrib): break # We get here iff break didn't happen, i.e. no meaningful attributes # differ between tz and localtime else: # Try and get a timezone from pytz which has the same name as the zone # which matches in the local database. if tzname not in pytz.all_timezones: warnings.warn("Skipping %s because not in pytz database." % tzname) continue matches.append(_tzinfome(tzname)) matches.sort(key=lambda x: x.zone) if len(matches) == 1: return matches[0] if len(matches) > 1: warnings.warn("We detected multiple matches for your /etc/localtime. " "(Matches where %s)" % matches) return matches[0] else: warnings.warn("We detected no matches for your /etc/localtime.") # Register /etc/localtime as the timezone loaded. pytz._tzinfo_cache["/etc/localtime"] = localtime return localtime
[ "def", "_detect_timezone_etc_localtime", "(", ")", ":", "matches", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "\"/etc/localtime\"", ")", ":", "f", "=", "open", "(", "\"/etc/localtime\"", ",", "\"rb\"", ")", "localtime", "=", "pytz", ".", ...
Detect timezone based on /etc/localtime file.
[ "Detect", "timezone", "based", "on", "/", "etc", "/", "localtime", "file", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L275-L339
mithro/python-datetime-tz
datetime_tz/__init__.py
_wrap_method
def _wrap_method(name): """Wrap a method. Patch a method which might return a datetime.datetime to return a datetime_tz.datetime_tz instead. Args: name: The name of the method to patch """ method = getattr(datetime.datetime, name) # Have to give the second argument as method has no __module__ option. @functools.wraps(method, ("__name__", "__doc__"), ()) def wrapper(self, *args, **kw): r = method(self, *args, **kw) if isinstance(r, datetime.datetime) and not isinstance(r, type(self)): r = type(self)(r) return r setattr(datetime_tz, name, wrapper)
python
def _wrap_method(name): """Wrap a method. Patch a method which might return a datetime.datetime to return a datetime_tz.datetime_tz instead. Args: name: The name of the method to patch """ method = getattr(datetime.datetime, name) # Have to give the second argument as method has no __module__ option. @functools.wraps(method, ("__name__", "__doc__"), ()) def wrapper(self, *args, **kw): r = method(self, *args, **kw) if isinstance(r, datetime.datetime) and not isinstance(r, type(self)): r = type(self)(r) return r setattr(datetime_tz, name, wrapper)
[ "def", "_wrap_method", "(", "name", ")", ":", "method", "=", "getattr", "(", "datetime", ".", "datetime", ",", "name", ")", "# Have to give the second argument as method has no __module__ option.", "@", "functools", ".", "wraps", "(", "method", ",", "(", "\"__name__...
Wrap a method. Patch a method which might return a datetime.datetime to return a datetime_tz.datetime_tz instead. Args: name: The name of the method to patch
[ "Wrap", "a", "method", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L855-L875
mithro/python-datetime-tz
datetime_tz/__init__.py
datetime_tz.asdatetime
def asdatetime(self, naive=True): """Return this datetime_tz as a datetime object. Args: naive: Return *without* any tz info. Returns: This datetime_tz as a datetime object. """ args = list(self.timetuple()[0:6])+[self.microsecond] if not naive: args.append(self.tzinfo) return datetime.datetime(*args)
python
def asdatetime(self, naive=True): """Return this datetime_tz as a datetime object. Args: naive: Return *without* any tz info. Returns: This datetime_tz as a datetime object. """ args = list(self.timetuple()[0:6])+[self.microsecond] if not naive: args.append(self.tzinfo) return datetime.datetime(*args)
[ "def", "asdatetime", "(", "self", ",", "naive", "=", "True", ")", ":", "args", "=", "list", "(", "self", ".", "timetuple", "(", ")", "[", "0", ":", "6", "]", ")", "+", "[", "self", ".", "microsecond", "]", "if", "not", "naive", ":", "args", "."...
Return this datetime_tz as a datetime object. Args: naive: Return *without* any tz info. Returns: This datetime_tz as a datetime object.
[ "Return", "this", "datetime_tz", "as", "a", "datetime", "object", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L476-L488
mithro/python-datetime-tz
datetime_tz/__init__.py
datetime_tz.asdate
def asdate(self): """Return this datetime_tz as a date object. Returns: This datetime_tz as a date object. """ return datetime.date(self.year, self.month, self.day)
python
def asdate(self): """Return this datetime_tz as a date object. Returns: This datetime_tz as a date object. """ return datetime.date(self.year, self.month, self.day)
[ "def", "asdate", "(", "self", ")", ":", "return", "datetime", ".", "date", "(", "self", ".", "year", ",", "self", ".", "month", ",", "self", ".", "day", ")" ]
Return this datetime_tz as a date object. Returns: This datetime_tz as a date object.
[ "Return", "this", "datetime_tz", "as", "a", "date", "object", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L490-L496
mithro/python-datetime-tz
datetime_tz/__init__.py
datetime_tz.astimezone
def astimezone(self, tzinfo): """Returns a version of this timestamp converted to the given timezone. Args: tzinfo: Either a datetime.tzinfo object or a string (which will be looked up in pytz. Returns: A datetime_tz object in the given timezone. """ # Assert we are not a naive datetime object assert self.tzinfo is not None tzinfo = _tzinfome(tzinfo) d = self.asdatetime(naive=False).astimezone(tzinfo) return type(self)(d)
python
def astimezone(self, tzinfo): """Returns a version of this timestamp converted to the given timezone. Args: tzinfo: Either a datetime.tzinfo object or a string (which will be looked up in pytz. Returns: A datetime_tz object in the given timezone. """ # Assert we are not a naive datetime object assert self.tzinfo is not None tzinfo = _tzinfome(tzinfo) d = self.asdatetime(naive=False).astimezone(tzinfo) return type(self)(d)
[ "def", "astimezone", "(", "self", ",", "tzinfo", ")", ":", "# Assert we are not a naive datetime object", "assert", "self", ".", "tzinfo", "is", "not", "None", "tzinfo", "=", "_tzinfome", "(", "tzinfo", ")", "d", "=", "self", ".", "asdatetime", "(", "naive", ...
Returns a version of this timestamp converted to the given timezone. Args: tzinfo: Either a datetime.tzinfo object or a string (which will be looked up in pytz. Returns: A datetime_tz object in the given timezone.
[ "Returns", "a", "version", "of", "this", "timestamp", "converted", "to", "the", "given", "timezone", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L508-L524
mithro/python-datetime-tz
datetime_tz/__init__.py
datetime_tz.replace
def replace(self, **kw): """Return datetime with new specified fields given as arguments. For example, dt.replace(days=4) would return a new datetime_tz object with exactly the same as dt but with the days attribute equal to 4. Any attribute can be replaced, but tzinfo can not be set to None. Args: Any datetime_tz attribute. Returns: A datetime_tz object with the attributes replaced. Raises: TypeError: If the given replacement is invalid. """ if "tzinfo" in kw: if kw["tzinfo"] is None: raise TypeError("Can not remove the timezone use asdatetime()") else: tzinfo = kw["tzinfo"] del kw["tzinfo"] else: tzinfo = None is_dst = None if "is_dst" in kw: is_dst = kw["is_dst"] del kw["is_dst"] else: # Use our own DST setting.. is_dst = self.is_dst replaced = self.asdatetime().replace(**kw) return type(self)( replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst)
python
def replace(self, **kw): """Return datetime with new specified fields given as arguments. For example, dt.replace(days=4) would return a new datetime_tz object with exactly the same as dt but with the days attribute equal to 4. Any attribute can be replaced, but tzinfo can not be set to None. Args: Any datetime_tz attribute. Returns: A datetime_tz object with the attributes replaced. Raises: TypeError: If the given replacement is invalid. """ if "tzinfo" in kw: if kw["tzinfo"] is None: raise TypeError("Can not remove the timezone use asdatetime()") else: tzinfo = kw["tzinfo"] del kw["tzinfo"] else: tzinfo = None is_dst = None if "is_dst" in kw: is_dst = kw["is_dst"] del kw["is_dst"] else: # Use our own DST setting.. is_dst = self.is_dst replaced = self.asdatetime().replace(**kw) return type(self)( replaced, tzinfo=tzinfo or self.tzinfo.zone, is_dst=is_dst)
[ "def", "replace", "(", "self", ",", "*", "*", "kw", ")", ":", "if", "\"tzinfo\"", "in", "kw", ":", "if", "kw", "[", "\"tzinfo\"", "]", "is", "None", ":", "raise", "TypeError", "(", "\"Can not remove the timezone use asdatetime()\"", ")", "else", ":", "tzin...
Return datetime with new specified fields given as arguments. For example, dt.replace(days=4) would return a new datetime_tz object with exactly the same as dt but with the days attribute equal to 4. Any attribute can be replaced, but tzinfo can not be set to None. Args: Any datetime_tz attribute. Returns: A datetime_tz object with the attributes replaced. Raises: TypeError: If the given replacement is invalid.
[ "Return", "datetime", "with", "new", "specified", "fields", "given", "as", "arguments", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L527-L564
mithro/python-datetime-tz
datetime_tz/__init__.py
datetime_tz.smartparse
def smartparse(cls, toparse, tzinfo=None): """Method which uses dateutil.parse and extras to try and parse the string. Valid dates are found at: http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2 Other valid formats include: "now" or "today" "yesterday" "tomorrow" "5 minutes ago" "10 hours ago" "10h5m ago" "start of yesterday" "end of tomorrow" "end of 3rd of March" Args: toparse: The string to parse. tzinfo: Timezone for the resultant datetime_tz object should be in. (Defaults to your local timezone.) Returns: New datetime_tz object. Raises: ValueError: If unable to make sense of the input. """ # Default for empty fields are: # year/month/day == now # hour/minute/second/microsecond == 0 toparse = toparse.strip() if tzinfo is None: dt = cls.now() else: dt = cls.now(tzinfo) default = dt.replace(hour=0, minute=0, second=0, microsecond=0) # Remove "start of " and "end of " prefix in the string if toparse.lower().startswith("end of "): toparse = toparse[7:].strip() dt += datetime.timedelta(days=1) dt = dt.replace(hour=0, minute=0, second=0, microsecond=0) dt -= datetime.timedelta(microseconds=1) default = dt elif toparse.lower().startswith("start of "): toparse = toparse[9:].strip() dt = dt.replace(hour=0, minute=0, second=0, microsecond=0) default = dt # Handle strings with "now", "today", "yesterday", "tomorrow" and "ago". # Need to use lowercase toparselower = toparse.lower() if toparselower in ["now", "today"]: pass elif toparselower == "yesterday": dt -= datetime.timedelta(days=1) elif toparselower in ("tomorrow", "tommorrow"): # tommorrow is spelled wrong, but code out there might be depending on it # working dt += datetime.timedelta(days=1) elif "ago" in toparselower: # Remove the "ago" bit toparselower = toparselower[:-3] # Replace all "a day and an hour" with "1 day 1 hour" toparselower = toparselower.replace("a ", "1 ") toparselower = toparselower.replace("an ", "1 ") toparselower = toparselower.replace(" and ", " ") # Match the following # 1 hour ago # 1h ago # 1 h ago # 1 hour ago # 2 hours ago # Same with minutes, seconds, etc. tocheck = ("seconds", "minutes", "hours", "days", "weeks", "months", "years") result = {} for match in re.finditer("([0-9]+)([^0-9]*)", toparselower): amount = int(match.group(1)) unit = match.group(2).strip() for bit in tocheck: regex = "^([%s]|((%s)s?))$" % ( bit[0], bit[:-1]) bitmatch = re.search(regex, unit) if bitmatch: result[bit] = amount break else: raise ValueError("Was not able to parse date unit %r!" % unit) delta = dateutil.relativedelta.relativedelta(**result) dt -= delta else: # Handle strings with normal datetime format, use original case. dt = dateutil.parser.parse(toparse, default=default.asdatetime(), tzinfos=pytz_abbr.tzinfos) if dt is None: raise ValueError("Was not able to parse date!") if dt.tzinfo is pytz_abbr.unknown: dt = dt.replace(tzinfo=None) if dt.tzinfo is None: if tzinfo is None: tzinfo = localtz() dt = cls(dt, tzinfo) else: if isinstance(dt.tzinfo, pytz_abbr.tzabbr): abbr = dt.tzinfo dt = dt.replace(tzinfo=None) dt = cls(dt, abbr.zone, is_dst=abbr.dst) dt = cls(dt) return dt
python
def smartparse(cls, toparse, tzinfo=None): """Method which uses dateutil.parse and extras to try and parse the string. Valid dates are found at: http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2 Other valid formats include: "now" or "today" "yesterday" "tomorrow" "5 minutes ago" "10 hours ago" "10h5m ago" "start of yesterday" "end of tomorrow" "end of 3rd of March" Args: toparse: The string to parse. tzinfo: Timezone for the resultant datetime_tz object should be in. (Defaults to your local timezone.) Returns: New datetime_tz object. Raises: ValueError: If unable to make sense of the input. """ # Default for empty fields are: # year/month/day == now # hour/minute/second/microsecond == 0 toparse = toparse.strip() if tzinfo is None: dt = cls.now() else: dt = cls.now(tzinfo) default = dt.replace(hour=0, minute=0, second=0, microsecond=0) # Remove "start of " and "end of " prefix in the string if toparse.lower().startswith("end of "): toparse = toparse[7:].strip() dt += datetime.timedelta(days=1) dt = dt.replace(hour=0, minute=0, second=0, microsecond=0) dt -= datetime.timedelta(microseconds=1) default = dt elif toparse.lower().startswith("start of "): toparse = toparse[9:].strip() dt = dt.replace(hour=0, minute=0, second=0, microsecond=0) default = dt # Handle strings with "now", "today", "yesterday", "tomorrow" and "ago". # Need to use lowercase toparselower = toparse.lower() if toparselower in ["now", "today"]: pass elif toparselower == "yesterday": dt -= datetime.timedelta(days=1) elif toparselower in ("tomorrow", "tommorrow"): # tommorrow is spelled wrong, but code out there might be depending on it # working dt += datetime.timedelta(days=1) elif "ago" in toparselower: # Remove the "ago" bit toparselower = toparselower[:-3] # Replace all "a day and an hour" with "1 day 1 hour" toparselower = toparselower.replace("a ", "1 ") toparselower = toparselower.replace("an ", "1 ") toparselower = toparselower.replace(" and ", " ") # Match the following # 1 hour ago # 1h ago # 1 h ago # 1 hour ago # 2 hours ago # Same with minutes, seconds, etc. tocheck = ("seconds", "minutes", "hours", "days", "weeks", "months", "years") result = {} for match in re.finditer("([0-9]+)([^0-9]*)", toparselower): amount = int(match.group(1)) unit = match.group(2).strip() for bit in tocheck: regex = "^([%s]|((%s)s?))$" % ( bit[0], bit[:-1]) bitmatch = re.search(regex, unit) if bitmatch: result[bit] = amount break else: raise ValueError("Was not able to parse date unit %r!" % unit) delta = dateutil.relativedelta.relativedelta(**result) dt -= delta else: # Handle strings with normal datetime format, use original case. dt = dateutil.parser.parse(toparse, default=default.asdatetime(), tzinfos=pytz_abbr.tzinfos) if dt is None: raise ValueError("Was not able to parse date!") if dt.tzinfo is pytz_abbr.unknown: dt = dt.replace(tzinfo=None) if dt.tzinfo is None: if tzinfo is None: tzinfo = localtz() dt = cls(dt, tzinfo) else: if isinstance(dt.tzinfo, pytz_abbr.tzabbr): abbr = dt.tzinfo dt = dt.replace(tzinfo=None) dt = cls(dt, abbr.zone, is_dst=abbr.dst) dt = cls(dt) return dt
[ "def", "smartparse", "(", "cls", ",", "toparse", ",", "tzinfo", "=", "None", ")", ":", "# Default for empty fields are:", "# year/month/day == now", "# hour/minute/second/microsecond == 0", "toparse", "=", "toparse", ".", "strip", "(", ")", "if", "tzinfo", "is", "...
Method which uses dateutil.parse and extras to try and parse the string. Valid dates are found at: http://labix.org/python-dateutil#head-1443e0f14ad5dff07efd465e080d1110920673d8-2 Other valid formats include: "now" or "today" "yesterday" "tomorrow" "5 minutes ago" "10 hours ago" "10h5m ago" "start of yesterday" "end of tomorrow" "end of 3rd of March" Args: toparse: The string to parse. tzinfo: Timezone for the resultant datetime_tz object should be in. (Defaults to your local timezone.) Returns: New datetime_tz object. Raises: ValueError: If unable to make sense of the input.
[ "Method", "which", "uses", "dateutil", ".", "parse", "and", "extras", "to", "try", "and", "parse", "the", "string", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L568-L698
mithro/python-datetime-tz
datetime_tz/__init__.py
datetime_tz.utcfromtimestamp
def utcfromtimestamp(cls, timestamp): """Returns a datetime object of a given timestamp (in UTC).""" obj = datetime.datetime.utcfromtimestamp(timestamp) obj = pytz.utc.localize(obj) return cls(obj)
python
def utcfromtimestamp(cls, timestamp): """Returns a datetime object of a given timestamp (in UTC).""" obj = datetime.datetime.utcfromtimestamp(timestamp) obj = pytz.utc.localize(obj) return cls(obj)
[ "def", "utcfromtimestamp", "(", "cls", ",", "timestamp", ")", ":", "obj", "=", "datetime", ".", "datetime", ".", "utcfromtimestamp", "(", "timestamp", ")", "obj", "=", "pytz", ".", "utc", ".", "localize", "(", "obj", ")", "return", "cls", "(", "obj", "...
Returns a datetime object of a given timestamp (in UTC).
[ "Returns", "a", "datetime", "object", "of", "a", "given", "timestamp", "(", "in", "UTC", ")", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L701-L705
mithro/python-datetime-tz
datetime_tz/__init__.py
datetime_tz.fromtimestamp
def fromtimestamp(cls, timestamp): """Returns a datetime object of a given timestamp (in local tz).""" d = cls.utcfromtimestamp(timestamp) return d.astimezone(localtz())
python
def fromtimestamp(cls, timestamp): """Returns a datetime object of a given timestamp (in local tz).""" d = cls.utcfromtimestamp(timestamp) return d.astimezone(localtz())
[ "def", "fromtimestamp", "(", "cls", ",", "timestamp", ")", ":", "d", "=", "cls", ".", "utcfromtimestamp", "(", "timestamp", ")", "return", "d", ".", "astimezone", "(", "localtz", "(", ")", ")" ]
Returns a datetime object of a given timestamp (in local tz).
[ "Returns", "a", "datetime", "object", "of", "a", "given", "timestamp", "(", "in", "local", "tz", ")", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L708-L711
mithro/python-datetime-tz
datetime_tz/__init__.py
datetime_tz.utcnow
def utcnow(cls): """Return a new datetime representing UTC day and time.""" obj = datetime.datetime.utcnow() obj = cls(obj, tzinfo=pytz.utc) return obj
python
def utcnow(cls): """Return a new datetime representing UTC day and time.""" obj = datetime.datetime.utcnow() obj = cls(obj, tzinfo=pytz.utc) return obj
[ "def", "utcnow", "(", "cls", ")", ":", "obj", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "obj", "=", "cls", "(", "obj", ",", "tzinfo", "=", "pytz", ".", "utc", ")", "return", "obj" ]
Return a new datetime representing UTC day and time.
[ "Return", "a", "new", "datetime", "representing", "UTC", "day", "and", "time", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L714-L718
mithro/python-datetime-tz
datetime_tz/__init__.py
datetime_tz.now
def now(cls, tzinfo=None): """[tz] -> new datetime with tz's local day and time.""" obj = cls.utcnow() if tzinfo is None: tzinfo = localtz() return obj.astimezone(tzinfo)
python
def now(cls, tzinfo=None): """[tz] -> new datetime with tz's local day and time.""" obj = cls.utcnow() if tzinfo is None: tzinfo = localtz() return obj.astimezone(tzinfo)
[ "def", "now", "(", "cls", ",", "tzinfo", "=", "None", ")", ":", "obj", "=", "cls", ".", "utcnow", "(", ")", "if", "tzinfo", "is", "None", ":", "tzinfo", "=", "localtz", "(", ")", "return", "obj", ".", "astimezone", "(", "tzinfo", ")" ]
[tz] -> new datetime with tz's local day and time.
[ "[", "tz", "]", "-", ">", "new", "datetime", "with", "tz", "s", "local", "day", "and", "time", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L721-L726
mithro/python-datetime-tz
datetime_tz/__init__.py
datetime_tz.combine
def combine(cls, date, time, tzinfo=None): """date, time, [tz] -> datetime with same date and time fields.""" if tzinfo is None: tzinfo = localtz() return cls(datetime.datetime.combine(date, time), tzinfo)
python
def combine(cls, date, time, tzinfo=None): """date, time, [tz] -> datetime with same date and time fields.""" if tzinfo is None: tzinfo = localtz() return cls(datetime.datetime.combine(date, time), tzinfo)
[ "def", "combine", "(", "cls", ",", "date", ",", "time", ",", "tzinfo", "=", "None", ")", ":", "if", "tzinfo", "is", "None", ":", "tzinfo", "=", "localtz", "(", ")", "return", "cls", "(", "datetime", ".", "datetime", ".", "combine", "(", "date", ","...
date, time, [tz] -> datetime with same date and time fields.
[ "date", "time", "[", "tz", "]", "-", ">", "datetime", "with", "same", "date", "and", "time", "fields", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L730-L734
mithro/python-datetime-tz
datetime_tz/__init__.py
iterate.between
def between(start, delta, end=None): """Return an iterator between this date till given end point. Example usage: >>> d = datetime_tz.smartparse("5 days ago") 2008/05/12 11:45 >>> for i in d.between(timedelta(days=1), datetime_tz.now()): >>> print i 2008/05/12 11:45 2008/05/13 11:45 2008/05/14 11:45 2008/05/15 11:45 2008/05/16 11:45 Args: start: The date to start at. delta: The interval to iterate with. end: (Optional) Date to end at. If not given the iterator will never terminate. Yields: datetime_tz objects. """ toyield = start while end is None or toyield < end: yield toyield toyield += delta
python
def between(start, delta, end=None): """Return an iterator between this date till given end point. Example usage: >>> d = datetime_tz.smartparse("5 days ago") 2008/05/12 11:45 >>> for i in d.between(timedelta(days=1), datetime_tz.now()): >>> print i 2008/05/12 11:45 2008/05/13 11:45 2008/05/14 11:45 2008/05/15 11:45 2008/05/16 11:45 Args: start: The date to start at. delta: The interval to iterate with. end: (Optional) Date to end at. If not given the iterator will never terminate. Yields: datetime_tz objects. """ toyield = start while end is None or toyield < end: yield toyield toyield += delta
[ "def", "between", "(", "start", ",", "delta", ",", "end", "=", "None", ")", ":", "toyield", "=", "start", "while", "end", "is", "None", "or", "toyield", "<", "end", ":", "yield", "toyield", "toyield", "+=", "delta" ]
Return an iterator between this date till given end point. Example usage: >>> d = datetime_tz.smartparse("5 days ago") 2008/05/12 11:45 >>> for i in d.between(timedelta(days=1), datetime_tz.now()): >>> print i 2008/05/12 11:45 2008/05/13 11:45 2008/05/14 11:45 2008/05/15 11:45 2008/05/16 11:45 Args: start: The date to start at. delta: The interval to iterate with. end: (Optional) Date to end at. If not given the iterator will never terminate. Yields: datetime_tz objects.
[ "Return", "an", "iterator", "between", "this", "date", "till", "given", "end", "point", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L756-L782
mithro/python-datetime-tz
datetime_tz/__init__.py
iterate.days
def days(start, end=None): """Iterate over the days between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a day apart. """ return iterate.between(start, datetime.timedelta(days=1), end)
python
def days(start, end=None): """Iterate over the days between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a day apart. """ return iterate.between(start, datetime.timedelta(days=1), end)
[ "def", "days", "(", "start", ",", "end", "=", "None", ")", ":", "return", "iterate", ".", "between", "(", "start", ",", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", ",", "end", ")" ]
Iterate over the days between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a day apart.
[ "Iterate", "over", "the", "days", "between", "the", "given", "datetime_tzs", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L799-L810
mithro/python-datetime-tz
datetime_tz/__init__.py
iterate.hours
def hours(start, end=None): """Iterate over the hours between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a hour apart. """ return iterate.between(start, datetime.timedelta(hours=1), end)
python
def hours(start, end=None): """Iterate over the hours between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a hour apart. """ return iterate.between(start, datetime.timedelta(hours=1), end)
[ "def", "hours", "(", "start", ",", "end", "=", "None", ")", ":", "return", "iterate", ".", "between", "(", "start", ",", "datetime", ".", "timedelta", "(", "hours", "=", "1", ")", ",", "end", ")" ]
Iterate over the hours between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a hour apart.
[ "Iterate", "over", "the", "hours", "between", "the", "given", "datetime_tzs", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L813-L824
mithro/python-datetime-tz
datetime_tz/__init__.py
iterate.minutes
def minutes(start, end=None): """Iterate over the minutes between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a minute apart. """ return iterate.between(start, datetime.timedelta(minutes=1), end)
python
def minutes(start, end=None): """Iterate over the minutes between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a minute apart. """ return iterate.between(start, datetime.timedelta(minutes=1), end)
[ "def", "minutes", "(", "start", ",", "end", "=", "None", ")", ":", "return", "iterate", ".", "between", "(", "start", ",", "datetime", ".", "timedelta", "(", "minutes", "=", "1", ")", ",", "end", ")" ]
Iterate over the minutes between the given datetime_tzs. Args: start: datetime_tz to start from. end: (Optional) Date to end at, if not given the iterator will never terminate. Returns: An iterator which generates datetime_tz objects a minute apart.
[ "Iterate", "over", "the", "minutes", "between", "the", "given", "datetime_tzs", "." ]
train
https://github.com/mithro/python-datetime-tz/blob/3c682d003f8b28e39f0c096773e471aeb68e6bbb/datetime_tz/__init__.py#L827-L838
kurtraschke/pyRFC3339
pyrfc3339/generator.py
generate
def generate(dt, utc=True, accept_naive=False, microseconds=False): ''' Generate an :RFC:`3339`-formatted timestamp from a :class:`datetime.datetime`. >>> from datetime import datetime >>> generate(datetime(2009,1,1,12,59,59,0,pytz.utc)) '2009-01-01T12:59:59Z' The timestamp will use UTC unless `utc=False` is specified, in which case it will use the timezone from the :class:`datetime.datetime`'s :attr:`tzinfo` parameter. >>> eastern = pytz.timezone('US/Eastern') >>> dt = eastern.localize(datetime(2009,1,1,12,59,59)) >>> generate(dt) '2009-01-01T17:59:59Z' >>> generate(dt, utc=False) '2009-01-01T12:59:59-05:00' Unless `accept_naive=True` is specified, the `datetime` must not be naive. >>> generate(datetime(2009,1,1,12,59,59,0)) Traceback (most recent call last): ... ValueError: naive datetime and accept_naive is False >>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True) '2009-01-01T12:59:59Z' If `accept_naive=True` is specified, the `datetime` is assumed to be UTC. Attempting to generate a local timestamp from a naive datetime will result in an error. >>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True, utc=False) Traceback (most recent call last): ... ValueError: cannot generate a local timestamp from a naive datetime ''' if dt.tzinfo is None: if accept_naive is True: if utc is True: dt = dt.replace(tzinfo=pytz.utc) else: raise ValueError("cannot generate a local timestamp from " + "a naive datetime") else: raise ValueError("naive datetime and accept_naive is False") if utc is True: dt = dt.astimezone(pytz.utc) timestamp = dt.strftime('%Y-%m-%dT%H:%M:%S') if microseconds is True: timestamp += dt.strftime('.%f') if dt.tzinfo is pytz.utc: timestamp += 'Z' else: timestamp += timezone(timedelta_seconds(dt.tzinfo.utcoffset(dt))) return timestamp
python
def generate(dt, utc=True, accept_naive=False, microseconds=False): ''' Generate an :RFC:`3339`-formatted timestamp from a :class:`datetime.datetime`. >>> from datetime import datetime >>> generate(datetime(2009,1,1,12,59,59,0,pytz.utc)) '2009-01-01T12:59:59Z' The timestamp will use UTC unless `utc=False` is specified, in which case it will use the timezone from the :class:`datetime.datetime`'s :attr:`tzinfo` parameter. >>> eastern = pytz.timezone('US/Eastern') >>> dt = eastern.localize(datetime(2009,1,1,12,59,59)) >>> generate(dt) '2009-01-01T17:59:59Z' >>> generate(dt, utc=False) '2009-01-01T12:59:59-05:00' Unless `accept_naive=True` is specified, the `datetime` must not be naive. >>> generate(datetime(2009,1,1,12,59,59,0)) Traceback (most recent call last): ... ValueError: naive datetime and accept_naive is False >>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True) '2009-01-01T12:59:59Z' If `accept_naive=True` is specified, the `datetime` is assumed to be UTC. Attempting to generate a local timestamp from a naive datetime will result in an error. >>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True, utc=False) Traceback (most recent call last): ... ValueError: cannot generate a local timestamp from a naive datetime ''' if dt.tzinfo is None: if accept_naive is True: if utc is True: dt = dt.replace(tzinfo=pytz.utc) else: raise ValueError("cannot generate a local timestamp from " + "a naive datetime") else: raise ValueError("naive datetime and accept_naive is False") if utc is True: dt = dt.astimezone(pytz.utc) timestamp = dt.strftime('%Y-%m-%dT%H:%M:%S') if microseconds is True: timestamp += dt.strftime('.%f') if dt.tzinfo is pytz.utc: timestamp += 'Z' else: timestamp += timezone(timedelta_seconds(dt.tzinfo.utcoffset(dt))) return timestamp
[ "def", "generate", "(", "dt", ",", "utc", "=", "True", ",", "accept_naive", "=", "False", ",", "microseconds", "=", "False", ")", ":", "if", "dt", ".", "tzinfo", "is", "None", ":", "if", "accept_naive", "is", "True", ":", "if", "utc", "is", "True", ...
Generate an :RFC:`3339`-formatted timestamp from a :class:`datetime.datetime`. >>> from datetime import datetime >>> generate(datetime(2009,1,1,12,59,59,0,pytz.utc)) '2009-01-01T12:59:59Z' The timestamp will use UTC unless `utc=False` is specified, in which case it will use the timezone from the :class:`datetime.datetime`'s :attr:`tzinfo` parameter. >>> eastern = pytz.timezone('US/Eastern') >>> dt = eastern.localize(datetime(2009,1,1,12,59,59)) >>> generate(dt) '2009-01-01T17:59:59Z' >>> generate(dt, utc=False) '2009-01-01T12:59:59-05:00' Unless `accept_naive=True` is specified, the `datetime` must not be naive. >>> generate(datetime(2009,1,1,12,59,59,0)) Traceback (most recent call last): ... ValueError: naive datetime and accept_naive is False >>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True) '2009-01-01T12:59:59Z' If `accept_naive=True` is specified, the `datetime` is assumed to be UTC. Attempting to generate a local timestamp from a naive datetime will result in an error. >>> generate(datetime(2009,1,1,12,59,59,0), accept_naive=True, utc=False) Traceback (most recent call last): ... ValueError: cannot generate a local timestamp from a naive datetime
[ "Generate", "an", ":", "RFC", ":", "3339", "-", "formatted", "timestamp", "from", "a", ":", "class", ":", "datetime", ".", "datetime", "." ]
train
https://github.com/kurtraschke/pyRFC3339/blob/e30cc1555adce0ecc7bd65509a2249d47e5a41b4/pyrfc3339/generator.py#L6-L67
jedie/django-cms-tools
django_cms_tools/fixture_helper/pages.py
get_or_create_placeholder
def get_or_create_placeholder(page, placeholder_slot, delete_existing=False): """ Get or create a placeholder on the given page. Optional: Delete existing placeholder. """ placeholder, created = page.placeholders.get_or_create( slot=placeholder_slot) if created: log.debug("Create placeholder %r for page %r", placeholder_slot, page.get_title()) else: log.debug("Use existing placeholder %r for page %r", placeholder_slot, page.get_title()) if delete_existing: queryset = CMSPlugin.objects.all().filter(placeholder=placeholder) log.info("Delete %i CMSPlugins on placeholder %s...", queryset.count(), placeholder) queryset.delete() return placeholder, created
python
def get_or_create_placeholder(page, placeholder_slot, delete_existing=False): """ Get or create a placeholder on the given page. Optional: Delete existing placeholder. """ placeholder, created = page.placeholders.get_or_create( slot=placeholder_slot) if created: log.debug("Create placeholder %r for page %r", placeholder_slot, page.get_title()) else: log.debug("Use existing placeholder %r for page %r", placeholder_slot, page.get_title()) if delete_existing: queryset = CMSPlugin.objects.all().filter(placeholder=placeholder) log.info("Delete %i CMSPlugins on placeholder %s...", queryset.count(), placeholder) queryset.delete() return placeholder, created
[ "def", "get_or_create_placeholder", "(", "page", ",", "placeholder_slot", ",", "delete_existing", "=", "False", ")", ":", "placeholder", ",", "created", "=", "page", ".", "placeholders", ".", "get_or_create", "(", "slot", "=", "placeholder_slot", ")", "if", "cre...
Get or create a placeholder on the given page. Optional: Delete existing placeholder.
[ "Get", "or", "create", "a", "placeholder", "on", "the", "given", "page", ".", "Optional", ":", "Delete", "existing", "placeholder", "." ]
train
https://github.com/jedie/django-cms-tools/blob/0a70dbbb6f770f5a73c8ecd174d5559a37262792/django_cms_tools/fixture_helper/pages.py#L24-L44