repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
edoburu/django-slug-preview
slug_preview/models.py
SlugPreviewField.pre_save
def pre_save(self, instance, add): """ Auto-generate the slug if needed. """ # get currently entered slug value = self.value_from_object(instance) slug = None # auto populate (if the form didn't do that already). # If you want unique_with logic, use django-autoslug instead. # This model field only allows parameters which can be passed to the form widget too. if self.populate_from and (self.always_update or not value): value = getattr(instance, self.populate_from) # Make sure the slugify logic is applied, # even on manually entered input. if value: value = force_text(value) slug = self.slugify(value) if self.max_length < len(slug): slug = slug[:self.max_length] # make the updated slug available as instance attribute setattr(instance, self.name, slug) return slug
python
def pre_save(self, instance, add): """ Auto-generate the slug if needed. """ # get currently entered slug value = self.value_from_object(instance) slug = None # auto populate (if the form didn't do that already). # If you want unique_with logic, use django-autoslug instead. # This model field only allows parameters which can be passed to the form widget too. if self.populate_from and (self.always_update or not value): value = getattr(instance, self.populate_from) # Make sure the slugify logic is applied, # even on manually entered input. if value: value = force_text(value) slug = self.slugify(value) if self.max_length < len(slug): slug = slug[:self.max_length] # make the updated slug available as instance attribute setattr(instance, self.name, slug) return slug
[ "def", "pre_save", "(", "self", ",", "instance", ",", "add", ")", ":", "# get currently entered slug", "value", "=", "self", ".", "value_from_object", "(", "instance", ")", "slug", "=", "None", "# auto populate (if the form didn't do that already).", "# If you want uniq...
Auto-generate the slug if needed.
[ "Auto", "-", "generate", "the", "slug", "if", "needed", "." ]
train
https://github.com/edoburu/django-slug-preview/blob/90d1bf5b408cfb71fac9be1d438890ff0b66ea51/slug_preview/models.py#L37-L61
theonion/django-bulbs
bulbs/reading_list/slicers.py
FirstSlotSlicer
def FirstSlotSlicer(primary_query, secondary_query, limit=30): # noqa """ Inject the first object from a queryset into the first position of a reading list. :param primary_queryset: djes.LazySearch object. Default queryset for reading list. :param secondary_queryset: djes.LazySearch object. first result leads the reading_list. :return list: mixed reading list. """ reading_list = SearchSlicer(limit=limit) reading_list.register_queryset(primary_query) reading_list.register_queryset(secondary_query, validator=lambda x: bool(x == 0)) return reading_list
python
def FirstSlotSlicer(primary_query, secondary_query, limit=30): # noqa """ Inject the first object from a queryset into the first position of a reading list. :param primary_queryset: djes.LazySearch object. Default queryset for reading list. :param secondary_queryset: djes.LazySearch object. first result leads the reading_list. :return list: mixed reading list. """ reading_list = SearchSlicer(limit=limit) reading_list.register_queryset(primary_query) reading_list.register_queryset(secondary_query, validator=lambda x: bool(x == 0)) return reading_list
[ "def", "FirstSlotSlicer", "(", "primary_query", ",", "secondary_query", ",", "limit", "=", "30", ")", ":", "# noqa", "reading_list", "=", "SearchSlicer", "(", "limit", "=", "limit", ")", "reading_list", ".", "register_queryset", "(", "primary_query", ")", "readi...
Inject the first object from a queryset into the first position of a reading list. :param primary_queryset: djes.LazySearch object. Default queryset for reading list. :param secondary_queryset: djes.LazySearch object. first result leads the reading_list. :return list: mixed reading list.
[ "Inject", "the", "first", "object", "from", "a", "queryset", "into", "the", "first", "position", "of", "a", "reading", "list", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/reading_list/slicers.py#L56-L67
theonion/django-bulbs
bulbs/reading_list/slicers.py
SearchSlicer.register_queryset
def register_queryset(self, queryset, validator=None, default=False): """ Add a given queryset to the iterator with custom logic for iteration. :param queryset: List of objects included in the reading list. :param validator: Custom logic to determine a queryset's position in a reading_list. Validators must accept an index as an argument and return a truthy value. :param default: Sets the given queryset as the primary queryset when no validator applies. """ if default or self.default_queryset is None: self.default_queryset = queryset return if validator: self.querysets[validator] = queryset else: raise ValueError( """Querysets require validation logic to integrate with reading lists.""" )
python
def register_queryset(self, queryset, validator=None, default=False): """ Add a given queryset to the iterator with custom logic for iteration. :param queryset: List of objects included in the reading list. :param validator: Custom logic to determine a queryset's position in a reading_list. Validators must accept an index as an argument and return a truthy value. :param default: Sets the given queryset as the primary queryset when no validator applies. """ if default or self.default_queryset is None: self.default_queryset = queryset return if validator: self.querysets[validator] = queryset else: raise ValueError( """Querysets require validation logic to integrate with reading lists.""" )
[ "def", "register_queryset", "(", "self", ",", "queryset", ",", "validator", "=", "None", ",", "default", "=", "False", ")", ":", "if", "default", "or", "self", ".", "default_queryset", "is", "None", ":", "self", ".", "default_queryset", "=", "queryset", "r...
Add a given queryset to the iterator with custom logic for iteration. :param queryset: List of objects included in the reading list. :param validator: Custom logic to determine a queryset's position in a reading_list. Validators must accept an index as an argument and return a truthy value. :param default: Sets the given queryset as the primary queryset when no validator applies.
[ "Add", "a", "given", "queryset", "to", "the", "iterator", "with", "custom", "logic", "for", "iteration", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/reading_list/slicers.py#L36-L53
theonion/django-bulbs
bulbs/special_coverage/management/commands/migrate_active_to_published.py
Command.get_month_start_date
def get_month_start_date(self): """Returns the first day of the current month""" now = timezone.now() return timezone.datetime(day=1, month=now.month, year=now.year, tzinfo=now.tzinfo)
python
def get_month_start_date(self): """Returns the first day of the current month""" now = timezone.now() return timezone.datetime(day=1, month=now.month, year=now.year, tzinfo=now.tzinfo)
[ "def", "get_month_start_date", "(", "self", ")", ":", "now", "=", "timezone", ".", "now", "(", ")", "return", "timezone", ".", "datetime", "(", "day", "=", "1", ",", "month", "=", "now", ".", "month", ",", "year", "=", "now", ".", "year", ",", "tzi...
Returns the first day of the current month
[ "Returns", "the", "first", "day", "of", "the", "current", "month" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/special_coverage/management/commands/migrate_active_to_published.py#L10-L13
arpitbbhayani/flasksr
flasksr/sr/basesr.py
BaseSR._yield_all
def _yield_all(self, l): ''' Given a iterable like list or tuple the function yields each of its items with _yield ''' if l is not None: if type(l) in [list, tuple]: for f in l: for x in self._yield(f): yield x else: for x in self._yield(l): yield x
python
def _yield_all(self, l): ''' Given a iterable like list or tuple the function yields each of its items with _yield ''' if l is not None: if type(l) in [list, tuple]: for f in l: for x in self._yield(f): yield x else: for x in self._yield(l): yield x
[ "def", "_yield_all", "(", "self", ",", "l", ")", ":", "if", "l", "is", "not", "None", ":", "if", "type", "(", "l", ")", "in", "[", "list", ",", "tuple", "]", ":", "for", "f", "in", "l", ":", "for", "x", "in", "self", ".", "_yield", "(", "f"...
Given a iterable like list or tuple the function yields each of its items with _yield
[ "Given", "a", "iterable", "like", "list", "or", "tuple", "the", "function", "yields", "each", "of", "its", "items", "with", "_yield" ]
train
https://github.com/arpitbbhayani/flasksr/blob/69c7dc071f52ac9d3691ff98bcc94fcd55b1a264/flasksr/sr/basesr.py#L68-L78
jjkester/moneybird-python
moneybird/api.py
MoneyBird.post
def post(self, resource_path: str, data: dict, administration_id: int = None): """ Performs a POST request to the endpoint identified by the resource path. POST requests are usually used to add new data. Example: >>> from moneybird import MoneyBird, TokenAuthentication >>> moneybird = MoneyBird(TokenAuthentication('access_token')) >>> data = {'url': 'http://www.mocky.io/v2/5185415ba171ea3a00704eed'} >>> moneybird.post('webhooks', data, 123) {'id': '143274315994891267', 'url': 'http://www.mocky.io/v2/5185415ba171ea3a00704eed', ... :param resource_path: The resource path. :param data: The data to send to the server. :param administration_id: The administration id (optional, depending on the resource path). :return: The decoded JSON response for the request. """ response = self.session.post( url=self._get_url(administration_id, resource_path), json=data, ) return self._process_response(response)
python
def post(self, resource_path: str, data: dict, administration_id: int = None): """ Performs a POST request to the endpoint identified by the resource path. POST requests are usually used to add new data. Example: >>> from moneybird import MoneyBird, TokenAuthentication >>> moneybird = MoneyBird(TokenAuthentication('access_token')) >>> data = {'url': 'http://www.mocky.io/v2/5185415ba171ea3a00704eed'} >>> moneybird.post('webhooks', data, 123) {'id': '143274315994891267', 'url': 'http://www.mocky.io/v2/5185415ba171ea3a00704eed', ... :param resource_path: The resource path. :param data: The data to send to the server. :param administration_id: The administration id (optional, depending on the resource path). :return: The decoded JSON response for the request. """ response = self.session.post( url=self._get_url(administration_id, resource_path), json=data, ) return self._process_response(response)
[ "def", "post", "(", "self", ",", "resource_path", ":", "str", ",", "data", ":", "dict", ",", "administration_id", ":", "int", "=", "None", ")", ":", "response", "=", "self", ".", "session", ".", "post", "(", "url", "=", "self", ".", "_get_url", "(", ...
Performs a POST request to the endpoint identified by the resource path. POST requests are usually used to add new data. Example: >>> from moneybird import MoneyBird, TokenAuthentication >>> moneybird = MoneyBird(TokenAuthentication('access_token')) >>> data = {'url': 'http://www.mocky.io/v2/5185415ba171ea3a00704eed'} >>> moneybird.post('webhooks', data, 123) {'id': '143274315994891267', 'url': 'http://www.mocky.io/v2/5185415ba171ea3a00704eed', ... :param resource_path: The resource path. :param data: The data to send to the server. :param administration_id: The administration id (optional, depending on the resource path). :return: The decoded JSON response for the request.
[ "Performs", "a", "POST", "request", "to", "the", "endpoint", "identified", "by", "the", "resource", "path", ".", "POST", "requests", "are", "usually", "used", "to", "add", "new", "data", "." ]
train
https://github.com/jjkester/moneybird-python/blob/da5f4c8c7ae6c8ed717dc273514a464bc9c284ed/moneybird/api.py#L48-L69
jjkester/moneybird-python
moneybird/api.py
MoneyBird.delete
def delete(self, resource_path: str, administration_id: int = None): """ Performs a DELETE request to the endpoint identified by the resource path. DELETE requests are usually used to (permanently) delete existing data. USE THIS METHOD WITH CAUTION. From a client perspective, DELETE requests behave similarly to GET requests. :param resource_path: The resource path. :param administration_id: The administration id (optional, depending on the resource path). :return: The decoded JSON response for the request. """ response = self.session.delete( url=self._get_url(administration_id, resource_path), ) return self._process_response(response)
python
def delete(self, resource_path: str, administration_id: int = None): """ Performs a DELETE request to the endpoint identified by the resource path. DELETE requests are usually used to (permanently) delete existing data. USE THIS METHOD WITH CAUTION. From a client perspective, DELETE requests behave similarly to GET requests. :param resource_path: The resource path. :param administration_id: The administration id (optional, depending on the resource path). :return: The decoded JSON response for the request. """ response = self.session.delete( url=self._get_url(administration_id, resource_path), ) return self._process_response(response)
[ "def", "delete", "(", "self", ",", "resource_path", ":", "str", ",", "administration_id", ":", "int", "=", "None", ")", ":", "response", "=", "self", ".", "session", ".", "delete", "(", "url", "=", "self", ".", "_get_url", "(", "administration_id", ",", ...
Performs a DELETE request to the endpoint identified by the resource path. DELETE requests are usually used to (permanently) delete existing data. USE THIS METHOD WITH CAUTION. From a client perspective, DELETE requests behave similarly to GET requests. :param resource_path: The resource path. :param administration_id: The administration id (optional, depending on the resource path). :return: The decoded JSON response for the request.
[ "Performs", "a", "DELETE", "request", "to", "the", "endpoint", "identified", "by", "the", "resource", "path", ".", "DELETE", "requests", "are", "usually", "used", "to", "(", "permanently", ")", "delete", "existing", "data", ".", "USE", "THIS", "METHOD", "WIT...
train
https://github.com/jjkester/moneybird-python/blob/da5f4c8c7ae6c8ed717dc273514a464bc9c284ed/moneybird/api.py#L89-L103
jjkester/moneybird-python
moneybird/api.py
MoneyBird.renew_session
def renew_session(self): """ Clears all session data and starts a new session using the same settings as before. This method can be used to clear session data, e.g., cookies. Future requests will use a new session initiated with the same settings and authentication method. """ logger.debug("API session renewed") self.session = self.authentication.get_session() self.session.headers.update({ 'User-Agent': 'MoneyBird for Python %s' % VERSION, 'Accept': 'application/json', })
python
def renew_session(self): """ Clears all session data and starts a new session using the same settings as before. This method can be used to clear session data, e.g., cookies. Future requests will use a new session initiated with the same settings and authentication method. """ logger.debug("API session renewed") self.session = self.authentication.get_session() self.session.headers.update({ 'User-Agent': 'MoneyBird for Python %s' % VERSION, 'Accept': 'application/json', })
[ "def", "renew_session", "(", "self", ")", ":", "logger", ".", "debug", "(", "\"API session renewed\"", ")", "self", ".", "session", "=", "self", ".", "authentication", ".", "get_session", "(", ")", "self", ".", "session", ".", "headers", ".", "update", "("...
Clears all session data and starts a new session using the same settings as before. This method can be used to clear session data, e.g., cookies. Future requests will use a new session initiated with the same settings and authentication method.
[ "Clears", "all", "session", "data", "and", "starts", "a", "new", "session", "using", "the", "same", "settings", "as", "before", "." ]
train
https://github.com/jjkester/moneybird-python/blob/da5f4c8c7ae6c8ed717dc273514a464bc9c284ed/moneybird/api.py#L105-L117
jjkester/moneybird-python
moneybird/api.py
MoneyBird._get_url
def _get_url(cls, administration_id: int, resource_path: str): """ Builds the URL to the API endpoint specified by the given parameters. :param administration_id: The ID of the administration (may be None). :param resource_path: The path to the resource. :return: The absolute URL to the endpoint. """ url = urljoin(cls.base_url, '%s/' % cls.version) if administration_id is not None: url = urljoin(url, '%s/' % administration_id) url = urljoin(url, '%s.json' % resource_path) return url
python
def _get_url(cls, administration_id: int, resource_path: str): """ Builds the URL to the API endpoint specified by the given parameters. :param administration_id: The ID of the administration (may be None). :param resource_path: The path to the resource. :return: The absolute URL to the endpoint. """ url = urljoin(cls.base_url, '%s/' % cls.version) if administration_id is not None: url = urljoin(url, '%s/' % administration_id) url = urljoin(url, '%s.json' % resource_path) return url
[ "def", "_get_url", "(", "cls", ",", "administration_id", ":", "int", ",", "resource_path", ":", "str", ")", ":", "url", "=", "urljoin", "(", "cls", ".", "base_url", ",", "'%s/'", "%", "cls", ".", "version", ")", "if", "administration_id", "is", "not", ...
Builds the URL to the API endpoint specified by the given parameters. :param administration_id: The ID of the administration (may be None). :param resource_path: The path to the resource. :return: The absolute URL to the endpoint.
[ "Builds", "the", "URL", "to", "the", "API", "endpoint", "specified", "by", "the", "given", "parameters", "." ]
train
https://github.com/jjkester/moneybird-python/blob/da5f4c8c7ae6c8ed717dc273514a464bc9c284ed/moneybird/api.py#L120-L135
jjkester/moneybird-python
moneybird/api.py
MoneyBird._process_response
def _process_response(response: requests.Response, expected: list = []) -> dict: """ Processes an API response. Raises an exception when appropriate. The exception that will be raised is MoneyBird.APIError. This exception is subclassed so implementing programs can easily react appropriately to different exceptions. The following subclasses of MoneyBird.APIError are likely to be raised: - MoneyBird.Unauthorized: No access to the resource or invalid authentication - MoneyBird.Throttled: Access (temporarily) denied, please try again - MoneyBird.NotFound: Resource not found, check resource path - MoneyBird.InvalidData: Validation errors occured while processing your input - MoneyBird.ServerError: Error on the server :param response: The response to process. :param expected: A list of expected status codes which won't raise an exception. :return: The useful data in the response (may be None). """ responses = { 200: None, 201: None, 204: None, 400: MoneyBird.Unauthorized, 401: MoneyBird.Unauthorized, 403: MoneyBird.Throttled, 404: MoneyBird.NotFound, 406: MoneyBird.NotFound, 422: MoneyBird.InvalidData, 429: MoneyBird.Throttled, 500: MoneyBird.ServerError, } logger.debug("API request: %s %s\n" % (response.request.method, response.request.url) + "Response: %s %s" % (response.status_code, response.text)) if response.status_code not in expected: if response.status_code not in responses: logger.error("API response contained unknown status code") raise MoneyBird.APIError(response, "API response contained unknown status code") elif responses[response.status_code] is not None: try: description = response.json()['error'] except (AttributeError, TypeError, KeyError, ValueError): description = None raise responses[response.status_code](response, description) try: data = response.json() except ValueError: logger.error("API response is not JSON decodable") data = None return data
python
def _process_response(response: requests.Response, expected: list = []) -> dict: """ Processes an API response. Raises an exception when appropriate. The exception that will be raised is MoneyBird.APIError. This exception is subclassed so implementing programs can easily react appropriately to different exceptions. The following subclasses of MoneyBird.APIError are likely to be raised: - MoneyBird.Unauthorized: No access to the resource or invalid authentication - MoneyBird.Throttled: Access (temporarily) denied, please try again - MoneyBird.NotFound: Resource not found, check resource path - MoneyBird.InvalidData: Validation errors occured while processing your input - MoneyBird.ServerError: Error on the server :param response: The response to process. :param expected: A list of expected status codes which won't raise an exception. :return: The useful data in the response (may be None). """ responses = { 200: None, 201: None, 204: None, 400: MoneyBird.Unauthorized, 401: MoneyBird.Unauthorized, 403: MoneyBird.Throttled, 404: MoneyBird.NotFound, 406: MoneyBird.NotFound, 422: MoneyBird.InvalidData, 429: MoneyBird.Throttled, 500: MoneyBird.ServerError, } logger.debug("API request: %s %s\n" % (response.request.method, response.request.url) + "Response: %s %s" % (response.status_code, response.text)) if response.status_code not in expected: if response.status_code not in responses: logger.error("API response contained unknown status code") raise MoneyBird.APIError(response, "API response contained unknown status code") elif responses[response.status_code] is not None: try: description = response.json()['error'] except (AttributeError, TypeError, KeyError, ValueError): description = None raise responses[response.status_code](response, description) try: data = response.json() except ValueError: logger.error("API response is not JSON decodable") data = None return data
[ "def", "_process_response", "(", "response", ":", "requests", ".", "Response", ",", "expected", ":", "list", "=", "[", "]", ")", "->", "dict", ":", "responses", "=", "{", "200", ":", "None", ",", "201", ":", "None", ",", "204", ":", "None", ",", "4...
Processes an API response. Raises an exception when appropriate. The exception that will be raised is MoneyBird.APIError. This exception is subclassed so implementing programs can easily react appropriately to different exceptions. The following subclasses of MoneyBird.APIError are likely to be raised: - MoneyBird.Unauthorized: No access to the resource or invalid authentication - MoneyBird.Throttled: Access (temporarily) denied, please try again - MoneyBird.NotFound: Resource not found, check resource path - MoneyBird.InvalidData: Validation errors occured while processing your input - MoneyBird.ServerError: Error on the server :param response: The response to process. :param expected: A list of expected status codes which won't raise an exception. :return: The useful data in the response (may be None).
[ "Processes", "an", "API", "response", ".", "Raises", "an", "exception", "when", "appropriate", "." ]
train
https://github.com/jjkester/moneybird-python/blob/da5f4c8c7ae6c8ed717dc273514a464bc9c284ed/moneybird/api.py#L138-L190
theonion/django-bulbs
bulbs/videos/models.py
VideohubVideo.search_videohub
def search_videohub(cls, query, filters=None, status=None, sort=None, size=None, page=None): """searches the videohub given a query and applies given filters and other bits :see: https://github.com/theonion/videohub/blob/master/docs/search/post.md :see: https://github.com/theonion/videohub/blob/master/docs/search/get.md :param query: query terms to search by :type query: str :example query: "brooklyn hipsters" # although, this is a little redundant... :param filters: video field value restrictions :type filters: dict :default filters: None :example filters: {"channel": "onion"} or {"series": "Today NOW"} :param status: limit the results to videos that are published, scheduled, draft :type status: str :default status: None :example status: "published" or "draft" or "scheduled" :param sort: video field related sorting :type sort: dict :default sort: None :example sort: {"title": "desc"} or {"description": "asc"} :param size: the page size (number of results) :type size: int :default size: None :example size": {"size": 20} :param page: the page number of the results :type page: int :default page: None :example page: {"page": 2} # note, you should use `size` in conjunction with `page` :return: a dictionary of results and meta information :rtype: dict """ # construct url url = getattr(settings, "VIDEOHUB_API_SEARCH_URL", cls.DEFAULT_VIDEOHUB_API_SEARCH_URL) # construct auth headers headers = { "Content-Type": "application/json", "Authorization": settings.VIDEOHUB_API_TOKEN, } # construct payload payload = { "query": query, } if filters: assert isinstance(filters, dict) payload["filters"] = filters if status: assert isinstance(status, six.string_types) payload.setdefault("filters", {}) payload["filters"]["status"] = status if sort: assert isinstance(sort, dict) payload["sort"] = sort if size: assert isinstance(size, (six.string_types, int)) payload["size"] = size if page: assert isinstance(page, (six.string_types, int)) payload["page"] = page # send request res = requests.post(url, data=json.dumps(payload), headers=headers) # raise if not 200 if res.status_code != 200: res.raise_for_status() # parse and return response return json.loads(res.content)
python
def search_videohub(cls, query, filters=None, status=None, sort=None, size=None, page=None): """searches the videohub given a query and applies given filters and other bits :see: https://github.com/theonion/videohub/blob/master/docs/search/post.md :see: https://github.com/theonion/videohub/blob/master/docs/search/get.md :param query: query terms to search by :type query: str :example query: "brooklyn hipsters" # although, this is a little redundant... :param filters: video field value restrictions :type filters: dict :default filters: None :example filters: {"channel": "onion"} or {"series": "Today NOW"} :param status: limit the results to videos that are published, scheduled, draft :type status: str :default status: None :example status: "published" or "draft" or "scheduled" :param sort: video field related sorting :type sort: dict :default sort: None :example sort: {"title": "desc"} or {"description": "asc"} :param size: the page size (number of results) :type size: int :default size: None :example size": {"size": 20} :param page: the page number of the results :type page: int :default page: None :example page: {"page": 2} # note, you should use `size` in conjunction with `page` :return: a dictionary of results and meta information :rtype: dict """ # construct url url = getattr(settings, "VIDEOHUB_API_SEARCH_URL", cls.DEFAULT_VIDEOHUB_API_SEARCH_URL) # construct auth headers headers = { "Content-Type": "application/json", "Authorization": settings.VIDEOHUB_API_TOKEN, } # construct payload payload = { "query": query, } if filters: assert isinstance(filters, dict) payload["filters"] = filters if status: assert isinstance(status, six.string_types) payload.setdefault("filters", {}) payload["filters"]["status"] = status if sort: assert isinstance(sort, dict) payload["sort"] = sort if size: assert isinstance(size, (six.string_types, int)) payload["size"] = size if page: assert isinstance(page, (six.string_types, int)) payload["page"] = page # send request res = requests.post(url, data=json.dumps(payload), headers=headers) # raise if not 200 if res.status_code != 200: res.raise_for_status() # parse and return response return json.loads(res.content)
[ "def", "search_videohub", "(", "cls", ",", "query", ",", "filters", "=", "None", ",", "status", "=", "None", ",", "sort", "=", "None", ",", "size", "=", "None", ",", "page", "=", "None", ")", ":", "# construct url", "url", "=", "getattr", "(", "setti...
searches the videohub given a query and applies given filters and other bits :see: https://github.com/theonion/videohub/blob/master/docs/search/post.md :see: https://github.com/theonion/videohub/blob/master/docs/search/get.md :param query: query terms to search by :type query: str :example query: "brooklyn hipsters" # although, this is a little redundant... :param filters: video field value restrictions :type filters: dict :default filters: None :example filters: {"channel": "onion"} or {"series": "Today NOW"} :param status: limit the results to videos that are published, scheduled, draft :type status: str :default status: None :example status: "published" or "draft" or "scheduled" :param sort: video field related sorting :type sort: dict :default sort: None :example sort: {"title": "desc"} or {"description": "asc"} :param size: the page size (number of results) :type size: int :default size: None :example size": {"size": 20} :param page: the page number of the results :type page: int :default page: None :example page: {"page": 2} # note, you should use `size` in conjunction with `page` :return: a dictionary of results and meta information :rtype: dict
[ "searches", "the", "videohub", "given", "a", "query", "and", "applies", "given", "filters", "and", "other", "bits" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/videos/models.py#L45-L116
theonion/django-bulbs
bulbs/videos/models.py
VideohubVideo.get_hub_url
def get_hub_url(self): """gets a canonical path to the detail page of the video on the hub :return: the path to the consumer ui detail page of the video :rtype: str """ url = getattr(settings, "VIDEOHUB_VIDEO_URL", self.DEFAULT_VIDEOHUB_VIDEO_URL) # slugify needs ascii ascii_title = "" if isinstance(self.title, str): ascii_title = self.title elif six.PY2 and isinstance(self.title, six.text_type): # Legacy unicode conversion ascii_title = self.title.encode('ascii', 'replace') path = slugify("{}-{}".format(ascii_title, self.id)) return url.format(path)
python
def get_hub_url(self): """gets a canonical path to the detail page of the video on the hub :return: the path to the consumer ui detail page of the video :rtype: str """ url = getattr(settings, "VIDEOHUB_VIDEO_URL", self.DEFAULT_VIDEOHUB_VIDEO_URL) # slugify needs ascii ascii_title = "" if isinstance(self.title, str): ascii_title = self.title elif six.PY2 and isinstance(self.title, six.text_type): # Legacy unicode conversion ascii_title = self.title.encode('ascii', 'replace') path = slugify("{}-{}".format(ascii_title, self.id)) return url.format(path)
[ "def", "get_hub_url", "(", "self", ")", ":", "url", "=", "getattr", "(", "settings", ",", "\"VIDEOHUB_VIDEO_URL\"", ",", "self", ".", "DEFAULT_VIDEOHUB_VIDEO_URL", ")", "# slugify needs ascii", "ascii_title", "=", "\"\"", "if", "isinstance", "(", "self", ".", "t...
gets a canonical path to the detail page of the video on the hub :return: the path to the consumer ui detail page of the video :rtype: str
[ "gets", "a", "canonical", "path", "to", "the", "detail", "page", "of", "the", "video", "on", "the", "hub" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/videos/models.py#L118-L136
theonion/django-bulbs
bulbs/videos/models.py
VideohubVideo.get_embed_url
def get_embed_url(self, targeting=None, recirc=None): """gets a canonical path to an embedded iframe of the video from the hub :return: the path to create an embedded iframe of the video :rtype: str """ url = getattr(settings, "VIDEOHUB_EMBED_URL", self.DEFAULT_VIDEOHUB_EMBED_URL) url = url.format(self.id) if targeting is not None: for k, v in sorted(targeting.items()): url += '&{0}={1}'.format(k, v) if recirc is not None: url += '&recirc={0}'.format(recirc) return url
python
def get_embed_url(self, targeting=None, recirc=None): """gets a canonical path to an embedded iframe of the video from the hub :return: the path to create an embedded iframe of the video :rtype: str """ url = getattr(settings, "VIDEOHUB_EMBED_URL", self.DEFAULT_VIDEOHUB_EMBED_URL) url = url.format(self.id) if targeting is not None: for k, v in sorted(targeting.items()): url += '&{0}={1}'.format(k, v) if recirc is not None: url += '&recirc={0}'.format(recirc) return url
[ "def", "get_embed_url", "(", "self", ",", "targeting", "=", "None", ",", "recirc", "=", "None", ")", ":", "url", "=", "getattr", "(", "settings", ",", "\"VIDEOHUB_EMBED_URL\"", ",", "self", ".", "DEFAULT_VIDEOHUB_EMBED_URL", ")", "url", "=", "url", ".", "f...
gets a canonical path to an embedded iframe of the video from the hub :return: the path to create an embedded iframe of the video :rtype: str
[ "gets", "a", "canonical", "path", "to", "an", "embedded", "iframe", "of", "the", "video", "from", "the", "hub" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/videos/models.py#L138-L151
theonion/django-bulbs
bulbs/videos/models.py
VideohubVideo.get_api_url
def get_api_url(self): """gets a canonical path to the api detail url of the video on the hub :return: the path to the api detail of the video :rtype: str """ url = getattr(settings, 'VIDEOHUB_API_URL', None) # Support alternate setting (used by most client projects) if not url: url = getattr(settings, 'VIDEOHUB_API_BASE_URL', None) if url: url = url.rstrip('/') + '/videos/{}' if not url: url = self.DEFAULT_VIDEOHUB_API_URL return url.format(self.id)
python
def get_api_url(self): """gets a canonical path to the api detail url of the video on the hub :return: the path to the api detail of the video :rtype: str """ url = getattr(settings, 'VIDEOHUB_API_URL', None) # Support alternate setting (used by most client projects) if not url: url = getattr(settings, 'VIDEOHUB_API_BASE_URL', None) if url: url = url.rstrip('/') + '/videos/{}' if not url: url = self.DEFAULT_VIDEOHUB_API_URL return url.format(self.id)
[ "def", "get_api_url", "(", "self", ")", ":", "url", "=", "getattr", "(", "settings", ",", "'VIDEOHUB_API_URL'", ",", "None", ")", "# Support alternate setting (used by most client projects)", "if", "not", "url", ":", "url", "=", "getattr", "(", "settings", ",", ...
gets a canonical path to the api detail url of the video on the hub :return: the path to the api detail of the video :rtype: str
[ "gets", "a", "canonical", "path", "to", "the", "api", "detail", "url", "of", "the", "video", "on", "the", "hub" ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/videos/models.py#L153-L167
geertj/pyskiplist
pyskiplist/dllist.py
dllist.remove
def remove(self, node): """Remove a node from the list. The *node* argument must be a node that was previously inserted in the list """ if node is None or node._prev == -1: return if node._next is None: self._last = node._prev # last node else: node._next._prev = node._prev if node._prev is None: self._first = node._next # first node else: node._prev._next = node._next node._prev = node._next = -1 self._size -= 1
python
def remove(self, node): """Remove a node from the list. The *node* argument must be a node that was previously inserted in the list """ if node is None or node._prev == -1: return if node._next is None: self._last = node._prev # last node else: node._next._prev = node._prev if node._prev is None: self._first = node._next # first node else: node._prev._next = node._next node._prev = node._next = -1 self._size -= 1
[ "def", "remove", "(", "self", ",", "node", ")", ":", "if", "node", "is", "None", "or", "node", ".", "_prev", "==", "-", "1", ":", "return", "if", "node", ".", "_next", "is", "None", ":", "self", ".", "_last", "=", "node", ".", "_prev", "# last no...
Remove a node from the list. The *node* argument must be a node that was previously inserted in the list
[ "Remove", "a", "node", "from", "the", "list", "." ]
train
https://github.com/geertj/pyskiplist/blob/c5f94cf135d42bb277255150d3f570ed807468b2/pyskiplist/dllist.py#L99-L116
PGower/PyCanvas
pycanvas/apis/outcome_groups.py
OutcomeGroupsAPI.get_all_outcome_links_for_context_accounts
def get_all_outcome_links_for_context_accounts(self, account_id, outcome_group_style=None, outcome_style=None): """ Get all outcome links for context. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - outcome_style """The detail level of the outcomes. Defaults to "abbrev". Specify "full" for more information.""" if outcome_style is not None: params["outcome_style"] = outcome_style # OPTIONAL - outcome_group_style """The detail level of the outcome groups. Defaults to "abbrev". Specify "full" for more information.""" if outcome_group_style is not None: params["outcome_group_style"] = outcome_group_style self.logger.debug("GET /api/v1/accounts/{account_id}/outcome_group_links with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/outcome_group_links".format(**path), data=data, params=params, all_pages=True)
python
def get_all_outcome_links_for_context_accounts(self, account_id, outcome_group_style=None, outcome_style=None): """ Get all outcome links for context. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # OPTIONAL - outcome_style """The detail level of the outcomes. Defaults to "abbrev". Specify "full" for more information.""" if outcome_style is not None: params["outcome_style"] = outcome_style # OPTIONAL - outcome_group_style """The detail level of the outcome groups. Defaults to "abbrev". Specify "full" for more information.""" if outcome_group_style is not None: params["outcome_group_style"] = outcome_group_style self.logger.debug("GET /api/v1/accounts/{account_id}/outcome_group_links with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/outcome_group_links".format(**path), data=data, params=params, all_pages=True)
[ "def", "get_all_outcome_links_for_context_accounts", "(", "self", ",", "account_id", ",", "outcome_group_style", "=", "None", ",", "outcome_style", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH...
Get all outcome links for context.
[ "Get", "all", "outcome", "links", "for", "context", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/outcome_groups.py#L103-L130
PGower/PyCanvas
pycanvas/apis/outcome_groups.py
OutcomeGroupsAPI.get_all_outcome_links_for_context_courses
def get_all_outcome_links_for_context_courses(self, course_id, outcome_group_style=None, outcome_style=None): """ Get all outcome links for context. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - outcome_style """The detail level of the outcomes. Defaults to "abbrev". Specify "full" for more information.""" if outcome_style is not None: params["outcome_style"] = outcome_style # OPTIONAL - outcome_group_style """The detail level of the outcome groups. Defaults to "abbrev". Specify "full" for more information.""" if outcome_group_style is not None: params["outcome_group_style"] = outcome_group_style self.logger.debug("GET /api/v1/courses/{course_id}/outcome_group_links with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/outcome_group_links".format(**path), data=data, params=params, all_pages=True)
python
def get_all_outcome_links_for_context_courses(self, course_id, outcome_group_style=None, outcome_style=None): """ Get all outcome links for context. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - outcome_style """The detail level of the outcomes. Defaults to "abbrev". Specify "full" for more information.""" if outcome_style is not None: params["outcome_style"] = outcome_style # OPTIONAL - outcome_group_style """The detail level of the outcome groups. Defaults to "abbrev". Specify "full" for more information.""" if outcome_group_style is not None: params["outcome_group_style"] = outcome_group_style self.logger.debug("GET /api/v1/courses/{course_id}/outcome_group_links with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/outcome_group_links".format(**path), data=data, params=params, all_pages=True)
[ "def", "get_all_outcome_links_for_context_courses", "(", "self", ",", "course_id", ",", "outcome_group_style", "=", "None", ",", "outcome_style", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH -...
Get all outcome links for context.
[ "Get", "all", "outcome", "links", "for", "context", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/outcome_groups.py#L132-L159
PGower/PyCanvas
pycanvas/apis/outcome_groups.py
OutcomeGroupsAPI.create_link_outcome_global_outcome_id
def create_link_outcome_global_outcome_id(self, id, outcome_id, calculation_int=None, calculation_method=None, description=None, display_name=None, mastery_points=None, ratings_description=None, ratings_points=None, title=None, vendor_guid=None): """ Create/link an outcome. Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - PATH - outcome_id """The ID of the existing outcome to link.""" path["outcome_id"] = outcome_id # OPTIONAL - title """The title of the new outcome. Required if outcome_id is absent.""" if title is not None: data["title"] = title # OPTIONAL - display_name """A friendly name shown in reports for outcomes with cryptic titles, such as common core standards names.""" if display_name is not None: data["display_name"] = display_name # OPTIONAL - description """The description of the new outcome.""" if description is not None: data["description"] = description # OPTIONAL - vendor_guid """A custom GUID for the learning standard.""" if vendor_guid is not None: data["vendor_guid"] = vendor_guid # OPTIONAL - mastery_points """The mastery threshold for the embedded rubric criterion.""" if mastery_points is not None: data["mastery_points"] = mastery_points # OPTIONAL - ratings[description] """The description of a rating level for the embedded rubric criterion.""" if ratings_description is not None: data["ratings[description]"] = ratings_description # OPTIONAL - ratings[points] """The points corresponding to a rating level for the embedded rubric criterion.""" if ratings_points is not None: data["ratings[points]"] = ratings_points # OPTIONAL - calculation_method """The new calculation method. Defaults to "highest"""" if calculation_method is not None: self._validate_enum(calculation_method, ["decaying_average", "n_mastery", "latest", "highest"]) data["calculation_method"] = calculation_method # OPTIONAL - calculation_int """The new calculation int. Only applies if the calculation_method is "decaying_average" or "n_mastery"""" if calculation_int is not None: data["calculation_int"] = calculation_int self.logger.debug("PUT /api/v1/global/outcome_groups/{id}/outcomes/{outcome_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/global/outcome_groups/{id}/outcomes/{outcome_id}".format(**path), data=data, params=params, single_item=True)
python
def create_link_outcome_global_outcome_id(self, id, outcome_id, calculation_int=None, calculation_method=None, description=None, display_name=None, mastery_points=None, ratings_description=None, ratings_points=None, title=None, vendor_guid=None): """ Create/link an outcome. Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - PATH - outcome_id """The ID of the existing outcome to link.""" path["outcome_id"] = outcome_id # OPTIONAL - title """The title of the new outcome. Required if outcome_id is absent.""" if title is not None: data["title"] = title # OPTIONAL - display_name """A friendly name shown in reports for outcomes with cryptic titles, such as common core standards names.""" if display_name is not None: data["display_name"] = display_name # OPTIONAL - description """The description of the new outcome.""" if description is not None: data["description"] = description # OPTIONAL - vendor_guid """A custom GUID for the learning standard.""" if vendor_guid is not None: data["vendor_guid"] = vendor_guid # OPTIONAL - mastery_points """The mastery threshold for the embedded rubric criterion.""" if mastery_points is not None: data["mastery_points"] = mastery_points # OPTIONAL - ratings[description] """The description of a rating level for the embedded rubric criterion.""" if ratings_description is not None: data["ratings[description]"] = ratings_description # OPTIONAL - ratings[points] """The points corresponding to a rating level for the embedded rubric criterion.""" if ratings_points is not None: data["ratings[points]"] = ratings_points # OPTIONAL - calculation_method """The new calculation method. Defaults to "highest"""" if calculation_method is not None: self._validate_enum(calculation_method, ["decaying_average", "n_mastery", "latest", "highest"]) data["calculation_method"] = calculation_method # OPTIONAL - calculation_int """The new calculation int. Only applies if the calculation_method is "decaying_average" or "n_mastery"""" if calculation_int is not None: data["calculation_int"] = calculation_int self.logger.debug("PUT /api/v1/global/outcome_groups/{id}/outcomes/{outcome_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/global/outcome_groups/{id}/outcomes/{outcome_id}".format(**path), data=data, params=params, single_item=True)
[ "def", "create_link_outcome_global_outcome_id", "(", "self", ",", "id", ",", "outcome_id", ",", "calculation_int", "=", "None", ",", "calculation_method", "=", "None", ",", "description", "=", "None", ",", "display_name", "=", "None", ",", "mastery_points", "=", ...
Create/link an outcome. Link an outcome into the outcome group. The outcome to link can either be specified by a PUT to the link URL for a specific outcome (the outcome_id in the PUT URLs) or by supplying the information for a new outcome (title, description, ratings, mastery_points) in a POST to the collection. If linking an existing outcome, the outcome_id must identify an outcome available to this context; i.e. an outcome owned by this group's context, an outcome owned by an associated account, or a global outcome. With outcome_id present, any other parameters are ignored. If defining a new outcome, the outcome is created in the outcome group's context using the provided title, description, ratings, and mastery points; the title is required but all other fields are optional. The new outcome is then linked into the outcome group. If ratings are provided when creating a new outcome, an embedded rubric criterion is included in the new outcome. This criterion's mastery_points default to the maximum points in the highest rating if not specified in the mastery_points parameter. Any ratings lacking a description are given a default of "No description". Any ratings lacking a point value are given a default of 0. If no ratings are provided, the mastery_points parameter is ignored.
[ "Create", "/", "link", "an", "outcome", ".", "Link", "an", "outcome", "into", "the", "outcome", "group", ".", "The", "outcome", "to", "link", "can", "either", "be", "specified", "by", "a", "PUT", "to", "the", "link", "URL", "for", "a", "specific", "out...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/outcome_groups.py#L598-L685
PGower/PyCanvas
pycanvas/apis/outcome_groups.py
OutcomeGroupsAPI.list_subgroups_global
def list_subgroups_global(self, id): """ List subgroups. List the immediate OutcomeGroup children of the outcome group. Paginated. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("GET /api/v1/global/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/global/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, all_pages=True)
python
def list_subgroups_global(self, id): """ List subgroups. List the immediate OutcomeGroup children of the outcome group. Paginated. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("GET /api/v1/global/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/global/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_subgroups_global", "(", "self", ",", "id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\"\"", "path", "[", "\"id\"", "]", "=", "id", "self", ".", "logger", ".", "d...
List subgroups. List the immediate OutcomeGroup children of the outcome group. Paginated.
[ "List", "subgroups", ".", "List", "the", "immediate", "OutcomeGroup", "children", "of", "the", "outcome", "group", ".", "Paginated", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/outcome_groups.py#L1141-L1156
PGower/PyCanvas
pycanvas/apis/outcome_groups.py
OutcomeGroupsAPI.list_subgroups_accounts
def list_subgroups_accounts(self, id, account_id): """ List subgroups. List the immediate OutcomeGroup children of the outcome group. Paginated. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("GET /api/v1/accounts/{account_id}/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, all_pages=True)
python
def list_subgroups_accounts(self, id, account_id): """ List subgroups. List the immediate OutcomeGroup children of the outcome group. Paginated. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("GET /api/v1/accounts/{account_id}/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_subgroups_accounts", "(", "self", ",", "id", ",", "account_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - account_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"account_id\"", "]", "=", "acco...
List subgroups. List the immediate OutcomeGroup children of the outcome group. Paginated.
[ "List", "subgroups", ".", "List", "the", "immediate", "OutcomeGroup", "children", "of", "the", "outcome", "group", ".", "Paginated", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/outcome_groups.py#L1158-L1177
PGower/PyCanvas
pycanvas/apis/outcome_groups.py
OutcomeGroupsAPI.create_subgroup_global
def create_subgroup_global(self, id, title, description=None, vendor_guid=None): """ Create a subgroup. Creates a new empty subgroup under the outcome group with the given title and description. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - title """The title of the new outcome group.""" data["title"] = title # OPTIONAL - description """The description of the new outcome group.""" if description is not None: data["description"] = description # OPTIONAL - vendor_guid """A custom GUID for the learning standard""" if vendor_guid is not None: data["vendor_guid"] = vendor_guid self.logger.debug("POST /api/v1/global/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/global/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, single_item=True)
python
def create_subgroup_global(self, id, title, description=None, vendor_guid=None): """ Create a subgroup. Creates a new empty subgroup under the outcome group with the given title and description. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - title """The title of the new outcome group.""" data["title"] = title # OPTIONAL - description """The description of the new outcome group.""" if description is not None: data["description"] = description # OPTIONAL - vendor_guid """A custom GUID for the learning standard""" if vendor_guid is not None: data["vendor_guid"] = vendor_guid self.logger.debug("POST /api/v1/global/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/global/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, single_item=True)
[ "def", "create_subgroup_global", "(", "self", ",", "id", ",", "title", ",", "description", "=", "None", ",", "vendor_guid", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"I...
Create a subgroup. Creates a new empty subgroup under the outcome group with the given title and description.
[ "Create", "a", "subgroup", ".", "Creates", "a", "new", "empty", "subgroup", "under", "the", "outcome", "group", "with", "the", "given", "title", "and", "description", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/outcome_groups.py#L1200-L1230
PGower/PyCanvas
pycanvas/apis/grading_periods.py
GradingPeriodsAPI.update_single_grading_period
def update_single_grading_period(self, id, course_id, grading_periods_end_date, grading_periods_start_date, grading_periods_weight=None): """ Update a single grading period. Update an existing grading period. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - grading_periods[start_date] """The date the grading period starts.""" data["grading_periods[start_date]"] = grading_periods_start_date # REQUIRED - grading_periods[end_date] """no description""" data["grading_periods[end_date]"] = grading_periods_end_date # OPTIONAL - grading_periods[weight] """A weight value that contributes to the overall weight of a grading period set which is used to calculate how much assignments in this period contribute to the total grade""" if grading_periods_weight is not None: data["grading_periods[weight]"] = grading_periods_weight self.logger.debug("PUT /api/v1/courses/{course_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/grading_periods/{id}".format(**path), data=data, params=params, no_data=True)
python
def update_single_grading_period(self, id, course_id, grading_periods_end_date, grading_periods_start_date, grading_periods_weight=None): """ Update a single grading period. Update an existing grading period. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - id """ID""" path["id"] = id # REQUIRED - grading_periods[start_date] """The date the grading period starts.""" data["grading_periods[start_date]"] = grading_periods_start_date # REQUIRED - grading_periods[end_date] """no description""" data["grading_periods[end_date]"] = grading_periods_end_date # OPTIONAL - grading_periods[weight] """A weight value that contributes to the overall weight of a grading period set which is used to calculate how much assignments in this period contribute to the total grade""" if grading_periods_weight is not None: data["grading_periods[weight]"] = grading_periods_weight self.logger.debug("PUT /api/v1/courses/{course_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/grading_periods/{id}".format(**path), data=data, params=params, no_data=True)
[ "def", "update_single_grading_period", "(", "self", ",", "id", ",", "course_id", ",", "grading_periods_end_date", ",", "grading_periods_start_date", ",", "grading_periods_weight", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "="...
Update a single grading period. Update an existing grading period.
[ "Update", "a", "single", "grading", "period", ".", "Update", "an", "existing", "grading", "period", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/grading_periods.py#L74-L106
PGower/PyCanvas
pycanvas/apis/grading_periods.py
GradingPeriodsAPI.delete_grading_period_accounts
def delete_grading_period_accounts(self, id, account_id): """ Delete a grading period. <b>204 No Content</b> response code is returned if the deletion was successful. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("DELETE /api/v1/accounts/{account_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/accounts/{account_id}/grading_periods/{id}".format(**path), data=data, params=params, no_data=True)
python
def delete_grading_period_accounts(self, id, account_id): """ Delete a grading period. <b>204 No Content</b> response code is returned if the deletion was successful. """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - id """ID""" path["id"] = id self.logger.debug("DELETE /api/v1/accounts/{account_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/accounts/{account_id}/grading_periods/{id}".format(**path), data=data, params=params, no_data=True)
[ "def", "delete_grading_period_accounts", "(", "self", ",", "id", ",", "account_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - account_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"account_id\"", "]", "=", ...
Delete a grading period. <b>204 No Content</b> response code is returned if the deletion was successful.
[ "Delete", "a", "grading", "period", ".", "<b", ">", "204", "No", "Content<", "/", "b", ">", "response", "code", "is", "returned", "if", "the", "deletion", "was", "successful", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/grading_periods.py#L130-L150
PGower/PyCanvas
pycanvas/apis/appointment_groups.py
AppointmentGroupsAPI.list_appointment_groups
def list_appointment_groups(self, context_codes=None, include=None, include_past_appointments=None, scope=None): """ List appointment groups. Retrieve the list of appointment groups that can be reserved or managed by the current user. """ path = {} data = {} params = {} # OPTIONAL - scope """Defaults to "reservable"""" if scope is not None: self._validate_enum(scope, ["reservable", "manageable"]) params["scope"] = scope # OPTIONAL - context_codes """Array of context codes used to limit returned results.""" if context_codes is not None: params["context_codes"] = context_codes # OPTIONAL - include_past_appointments """Defaults to false. If true, includes past appointment groups""" if include_past_appointments is not None: params["include_past_appointments"] = include_past_appointments # OPTIONAL - include """Array of additional information to include. "appointments":: calendar event time slots for this appointment group "child_events":: reservations of those time slots "participant_count":: number of reservations "reserved_times":: the event id, start time and end time of reservations the current user has made) "all_context_codes":: all context codes associated with this appointment group""" if include is not None: self._validate_enum(include, ["appointments", "child_events", "participant_count", "reserved_times", "all_context_codes"]) params["include"] = include self.logger.debug("GET /api/v1/appointment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/appointment_groups".format(**path), data=data, params=params, no_data=True)
python
def list_appointment_groups(self, context_codes=None, include=None, include_past_appointments=None, scope=None): """ List appointment groups. Retrieve the list of appointment groups that can be reserved or managed by the current user. """ path = {} data = {} params = {} # OPTIONAL - scope """Defaults to "reservable"""" if scope is not None: self._validate_enum(scope, ["reservable", "manageable"]) params["scope"] = scope # OPTIONAL - context_codes """Array of context codes used to limit returned results.""" if context_codes is not None: params["context_codes"] = context_codes # OPTIONAL - include_past_appointments """Defaults to false. If true, includes past appointment groups""" if include_past_appointments is not None: params["include_past_appointments"] = include_past_appointments # OPTIONAL - include """Array of additional information to include. "appointments":: calendar event time slots for this appointment group "child_events":: reservations of those time slots "participant_count":: number of reservations "reserved_times":: the event id, start time and end time of reservations the current user has made) "all_context_codes":: all context codes associated with this appointment group""" if include is not None: self._validate_enum(include, ["appointments", "child_events", "participant_count", "reserved_times", "all_context_codes"]) params["include"] = include self.logger.debug("GET /api/v1/appointment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/appointment_groups".format(**path), data=data, params=params, no_data=True)
[ "def", "list_appointment_groups", "(", "self", ",", "context_codes", "=", "None", ",", "include", "=", "None", ",", "include_past_appointments", "=", "None", ",", "scope", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "="...
List appointment groups. Retrieve the list of appointment groups that can be reserved or managed by the current user.
[ "List", "appointment", "groups", ".", "Retrieve", "the", "list", "of", "appointment", "groups", "that", "can", "be", "reserved", "or", "managed", "by", "the", "current", "user", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/appointment_groups.py#L19-L60
PGower/PyCanvas
pycanvas/apis/appointment_groups.py
AppointmentGroupsAPI.create_appointment_group
def create_appointment_group(self, appointment_group_title, appointment_group_context_codes, appointment_group_description=None, appointment_group_location_address=None, appointment_group_location_name=None, appointment_group_max_appointments_per_participant=None, appointment_group_min_appointments_per_participant=None, appointment_group_new_appointments_X=None, appointment_group_participant_visibility=None, appointment_group_participants_per_appointment=None, appointment_group_publish=None, appointment_group_sub_context_codes=None): """ Create an appointment group. Create and return a new appointment group. If new_appointments are specified, the response will return a new_appointments array (same format as appointments array, see "List appointment groups" action) """ path = {} data = {} params = {} # REQUIRED - appointment_group[context_codes] """Array of context codes (courses, e.g. course_1) this group should be linked to (1 or more). Users in the course(s) with appropriate permissions will be able to sign up for this appointment group.""" data["appointment_group[context_codes]"] = appointment_group_context_codes # OPTIONAL - appointment_group[sub_context_codes] """Array of sub context codes (course sections or a single group category) this group should be linked to. Used to limit the appointment group to particular sections. If a group category is specified, students will sign up in groups and the participant_type will be "Group" instead of "User".""" if appointment_group_sub_context_codes is not None: data["appointment_group[sub_context_codes]"] = appointment_group_sub_context_codes # REQUIRED - appointment_group[title] """Short title for the appointment group.""" data["appointment_group[title]"] = appointment_group_title # OPTIONAL - appointment_group[description] """Longer text description of the appointment group.""" if appointment_group_description is not None: data["appointment_group[description]"] = appointment_group_description # OPTIONAL - appointment_group[location_name] """Location name of the appointment group.""" if appointment_group_location_name is not None: data["appointment_group[location_name]"] = appointment_group_location_name # OPTIONAL - appointment_group[location_address] """Location address.""" if appointment_group_location_address is not None: data["appointment_group[location_address]"] = appointment_group_location_address # OPTIONAL - appointment_group[publish] """Indicates whether this appointment group should be published (i.e. made available for signup). Once published, an appointment group cannot be unpublished. Defaults to false.""" if appointment_group_publish is not None: data["appointment_group[publish]"] = appointment_group_publish # OPTIONAL - appointment_group[participants_per_appointment] """Maximum number of participants that may register for each time slot. Defaults to null (no limit).""" if appointment_group_participants_per_appointment is not None: data["appointment_group[participants_per_appointment]"] = appointment_group_participants_per_appointment # OPTIONAL - appointment_group[min_appointments_per_participant] """Minimum number of time slots a user must register for. If not set, users do not need to sign up for any time slots.""" if appointment_group_min_appointments_per_participant is not None: data["appointment_group[min_appointments_per_participant]"] = appointment_group_min_appointments_per_participant # OPTIONAL - appointment_group[max_appointments_per_participant] """Maximum number of time slots a user may register for.""" if appointment_group_max_appointments_per_participant is not None: data["appointment_group[max_appointments_per_participant]"] = appointment_group_max_appointments_per_participant # OPTIONAL - appointment_group[new_appointments][X] """Nested array of start time/end time pairs indicating time slots for this appointment group. Refer to the example request.""" if appointment_group_new_appointments_X is not None: data["appointment_group[new_appointments][X]"] = appointment_group_new_appointments_X # OPTIONAL - appointment_group[participant_visibility] """"private":: participants cannot see who has signed up for a particular time slot "protected":: participants can see who has signed up. Defaults to "private".""" if appointment_group_participant_visibility is not None: self._validate_enum(appointment_group_participant_visibility, ["private", "protected"]) data["appointment_group[participant_visibility]"] = appointment_group_participant_visibility self.logger.debug("POST /api/v1/appointment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/appointment_groups".format(**path), data=data, params=params, no_data=True)
python
def create_appointment_group(self, appointment_group_title, appointment_group_context_codes, appointment_group_description=None, appointment_group_location_address=None, appointment_group_location_name=None, appointment_group_max_appointments_per_participant=None, appointment_group_min_appointments_per_participant=None, appointment_group_new_appointments_X=None, appointment_group_participant_visibility=None, appointment_group_participants_per_appointment=None, appointment_group_publish=None, appointment_group_sub_context_codes=None): """ Create an appointment group. Create and return a new appointment group. If new_appointments are specified, the response will return a new_appointments array (same format as appointments array, see "List appointment groups" action) """ path = {} data = {} params = {} # REQUIRED - appointment_group[context_codes] """Array of context codes (courses, e.g. course_1) this group should be linked to (1 or more). Users in the course(s) with appropriate permissions will be able to sign up for this appointment group.""" data["appointment_group[context_codes]"] = appointment_group_context_codes # OPTIONAL - appointment_group[sub_context_codes] """Array of sub context codes (course sections or a single group category) this group should be linked to. Used to limit the appointment group to particular sections. If a group category is specified, students will sign up in groups and the participant_type will be "Group" instead of "User".""" if appointment_group_sub_context_codes is not None: data["appointment_group[sub_context_codes]"] = appointment_group_sub_context_codes # REQUIRED - appointment_group[title] """Short title for the appointment group.""" data["appointment_group[title]"] = appointment_group_title # OPTIONAL - appointment_group[description] """Longer text description of the appointment group.""" if appointment_group_description is not None: data["appointment_group[description]"] = appointment_group_description # OPTIONAL - appointment_group[location_name] """Location name of the appointment group.""" if appointment_group_location_name is not None: data["appointment_group[location_name]"] = appointment_group_location_name # OPTIONAL - appointment_group[location_address] """Location address.""" if appointment_group_location_address is not None: data["appointment_group[location_address]"] = appointment_group_location_address # OPTIONAL - appointment_group[publish] """Indicates whether this appointment group should be published (i.e. made available for signup). Once published, an appointment group cannot be unpublished. Defaults to false.""" if appointment_group_publish is not None: data["appointment_group[publish]"] = appointment_group_publish # OPTIONAL - appointment_group[participants_per_appointment] """Maximum number of participants that may register for each time slot. Defaults to null (no limit).""" if appointment_group_participants_per_appointment is not None: data["appointment_group[participants_per_appointment]"] = appointment_group_participants_per_appointment # OPTIONAL - appointment_group[min_appointments_per_participant] """Minimum number of time slots a user must register for. If not set, users do not need to sign up for any time slots.""" if appointment_group_min_appointments_per_participant is not None: data["appointment_group[min_appointments_per_participant]"] = appointment_group_min_appointments_per_participant # OPTIONAL - appointment_group[max_appointments_per_participant] """Maximum number of time slots a user may register for.""" if appointment_group_max_appointments_per_participant is not None: data["appointment_group[max_appointments_per_participant]"] = appointment_group_max_appointments_per_participant # OPTIONAL - appointment_group[new_appointments][X] """Nested array of start time/end time pairs indicating time slots for this appointment group. Refer to the example request.""" if appointment_group_new_appointments_X is not None: data["appointment_group[new_appointments][X]"] = appointment_group_new_appointments_X # OPTIONAL - appointment_group[participant_visibility] """"private":: participants cannot see who has signed up for a particular time slot "protected":: participants can see who has signed up. Defaults to "private".""" if appointment_group_participant_visibility is not None: self._validate_enum(appointment_group_participant_visibility, ["private", "protected"]) data["appointment_group[participant_visibility]"] = appointment_group_participant_visibility self.logger.debug("POST /api/v1/appointment_groups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/appointment_groups".format(**path), data=data, params=params, no_data=True)
[ "def", "create_appointment_group", "(", "self", ",", "appointment_group_title", ",", "appointment_group_context_codes", ",", "appointment_group_description", "=", "None", ",", "appointment_group_location_address", "=", "None", ",", "appointment_group_location_name", "=", "None"...
Create an appointment group. Create and return a new appointment group. If new_appointments are specified, the response will return a new_appointments array (same format as appointments array, see "List appointment groups" action)
[ "Create", "an", "appointment", "group", ".", "Create", "and", "return", "a", "new", "appointment", "group", ".", "If", "new_appointments", "are", "specified", "the", "response", "will", "return", "a", "new_appointments", "array", "(", "same", "format", "as", "...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/appointment_groups.py#L62-L147
PGower/PyCanvas
pycanvas/apis/appointment_groups.py
AppointmentGroupsAPI.list_user_participants
def list_user_participants(self, id, registration_status=None): """ List user participants. List users that are (or may be) participating in this appointment group. Refer to the Users API for the response fields. Returns no results for appointment groups with the "Group" participant_type. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - registration_status """Limits results to the a given participation status, defaults to "all"""" if registration_status is not None: self._validate_enum(registration_status, ["all", "registered", "registered"]) params["registration_status"] = registration_status self.logger.debug("GET /api/v1/appointment_groups/{id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/appointment_groups/{id}/users".format(**path), data=data, params=params, no_data=True)
python
def list_user_participants(self, id, registration_status=None): """ List user participants. List users that are (or may be) participating in this appointment group. Refer to the Users API for the response fields. Returns no results for appointment groups with the "Group" participant_type. """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - registration_status """Limits results to the a given participation status, defaults to "all"""" if registration_status is not None: self._validate_enum(registration_status, ["all", "registered", "registered"]) params["registration_status"] = registration_status self.logger.debug("GET /api/v1/appointment_groups/{id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/appointment_groups/{id}/users".format(**path), data=data, params=params, no_data=True)
[ "def", "list_user_participants", "(", "self", ",", "id", ",", "registration_status", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\"\"", "path", "[", "\"id\"", "]", "="...
List user participants. List users that are (or may be) participating in this appointment group. Refer to the Users API for the response fields. Returns no results for appointment groups with the "Group" participant_type.
[ "List", "user", "participants", ".", "List", "users", "that", "are", "(", "or", "may", "be", ")", "participating", "in", "this", "appointment", "group", ".", "Refer", "to", "the", "Users", "API", "for", "the", "response", "fields", ".", "Returns", "no", ...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/appointment_groups.py#L291-L314
PGower/PyCanvas
pycanvas/apis/appointment_groups.py
AppointmentGroupsAPI.get_next_appointment
def get_next_appointment(self, appointment_group_ids=None): """ Get next appointment. Return the next appointment available to sign up for. The appointment is returned in a one-element array. If no future appointments are available, an empty array is returned. """ path = {} data = {} params = {} # OPTIONAL - appointment_group_ids """List of ids of appointment groups to search.""" if appointment_group_ids is not None: params["appointment_group_ids"] = appointment_group_ids self.logger.debug("GET /api/v1/appointment_groups/next_appointment with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/appointment_groups/next_appointment".format(**path), data=data, params=params, all_pages=True)
python
def get_next_appointment(self, appointment_group_ids=None): """ Get next appointment. Return the next appointment available to sign up for. The appointment is returned in a one-element array. If no future appointments are available, an empty array is returned. """ path = {} data = {} params = {} # OPTIONAL - appointment_group_ids """List of ids of appointment groups to search.""" if appointment_group_ids is not None: params["appointment_group_ids"] = appointment_group_ids self.logger.debug("GET /api/v1/appointment_groups/next_appointment with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/appointment_groups/next_appointment".format(**path), data=data, params=params, all_pages=True)
[ "def", "get_next_appointment", "(", "self", ",", "appointment_group_ids", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# OPTIONAL - appointment_group_ids\r", "\"\"\"List of ids of appointment groups to search.\"\"\"", ...
Get next appointment. Return the next appointment available to sign up for. The appointment is returned in a one-element array. If no future appointments are available, an empty array is returned.
[ "Get", "next", "appointment", ".", "Return", "the", "next", "appointment", "available", "to", "sign", "up", "for", ".", "The", "appointment", "is", "returned", "in", "a", "one", "-", "element", "array", ".", "If", "no", "future", "appointments", "are", "av...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/appointment_groups.py#L341-L359
bioasp/caspo
caspo/core/dataset.py
Dataset.to_funset
def to_funset(self, discrete): """ Converts the dataset to a set of `gringo.Fun`_ instances Parameters ---------- discrete : callable A discretization function Returns ------- set Representation of the dataset as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = self.clampings.to_funset("exp") fs = fs.union(self.setup.to_funset()) for i, row in self.readouts.iterrows(): for var, val in row.iteritems(): if not np.isnan(val): fs.add(gringo.Fun('obs', [i, var, discrete(val)])) return fs
python
def to_funset(self, discrete): """ Converts the dataset to a set of `gringo.Fun`_ instances Parameters ---------- discrete : callable A discretization function Returns ------- set Representation of the dataset as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun """ fs = self.clampings.to_funset("exp") fs = fs.union(self.setup.to_funset()) for i, row in self.readouts.iterrows(): for var, val in row.iteritems(): if not np.isnan(val): fs.add(gringo.Fun('obs', [i, var, discrete(val)])) return fs
[ "def", "to_funset", "(", "self", ",", "discrete", ")", ":", "fs", "=", "self", ".", "clampings", ".", "to_funset", "(", "\"exp\"", ")", "fs", "=", "fs", ".", "union", "(", "self", ".", "setup", ".", "to_funset", "(", ")", ")", "for", "i", ",", "r...
Converts the dataset to a set of `gringo.Fun`_ instances Parameters ---------- discrete : callable A discretization function Returns ------- set Representation of the dataset as a set of `gringo.Fun`_ instances .. _gringo.Fun: http://potassco.sourceforge.net/gringo.html#Fun
[ "Converts", "the", "dataset", "to", "a", "set", "of", "gringo", ".", "Fun", "_", "instances" ]
train
https://github.com/bioasp/caspo/blob/a68d1eace75b9b08f23633d1fb5ce6134403959e/caspo/core/dataset.py#L139-L164
bharadwaj-raju/libdesktop
libdesktop/dialog/files.py
open_file
def open_file(default_dir='~', extensions=None, title='Choose a file', multiple_files=False, directory=False): '''Start the native file dialog for opening file(s). Starts the system native file dialog in order to open a file (or multiple files). The toolkit used for each platform: +-------------------------------------+------------------------------+ | Windows | Windows API (Win32) | +-------------------------------------+------------------------------+ | Mac OS X | Cocoa | +-------------------------------------+------------------------------+ | GNOME, Unity, Cinnamon, Pantheon | GTK+ 3 | +-------------------------------------+------------------------------+ | KDE, LXQt | Qt 5 (fallback: Qt 4/GTK+ 3) | +-------------------------------------+------------------------------+ | Other desktops (Xfce, WMs etc) | GTK+ 2 (fallback: GTK+ 3) | +-------------------------------------+------------------------------+ **Note on Dependencies** It depends on pywin32 for Windows (installed by default in Python for Windows) It depends on `PyQt <https://riverbankcomputing.com/software/pyqt>`_ for KDE and LxQt (usually installed by default on these). It depends on `PyGObject <https://wiki.gnome.org/Projects/PyGObject>`_ for GNOME etc. (virtually every Linux desktop has this). It depends on `PyGTK <https://pygtk.org>`_ for other desktops (not usually installed, so has a GTK+ 3 fallback). Args: default_dir (str) : The directory to start the dialog in. Default: User home directory. extensions (dict) : The extensions to filter by. Format: .. code-block:: python { 'Filter Name (example: Image Files)': ['*.png', '*.whatever', '*'] } title (str) : The title of the dialog. Default: `Choose a file` multiple_files (bool): Whether to choose multiple files or single files only. Default: `False` directory (bool): Whether to choose directories. Default: `False` Returns: list: `list` of `str` s (each `str` being a selected file). If nothing is selected/dialog is cancelled, it is `None`. ''' default_dir = os.path.expanduser(default_dir) if not extensions: extensions = {} if system.get_name() == 'windows': pass # TODO: Implement Win32 file dialog elif system.get_name() == 'mac': pass # TODO: Implement Cocoa file dialog else: def gtk3_dialog(): # GTK+ 3 import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk class FileChooserWindow(Gtk.Window): def __init__(self): self.path = '' Gtk.Window.__init__(self, title='') dialog = Gtk.FileChooserDialog(title, None, Gtk.FileChooserAction.OPEN, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK) ) if extensions: for entry in extensions: file_filter = Gtk.FileFilter() file_filter.set_name(entry) for pattern in extensions[entry]: file_filter.add_pattern(pattern) dialog.add_filter(file_filter) dialog.set_select_multiple(multiple_files) dialog.set_current_folder(default_dir) response = dialog.run() if response == Gtk.ResponseType.OK: self.path = dialog.get_filenames() dialog.destroy() elif response == Gtk.ResponseType.CANCEL: self.path = None dialog.destroy() win = FileChooserWindow() win.connect('destroy', Gtk.main_quit) win.connect('delete-event', Gtk.main_quit) win.show_all() win.destroy() win.close() return win.path def qt5_dialog(): # Qt 5 try: from PyQt5 import Qt except ImportError: # The API is the same for what this uses from PyQt4 import Qt class FileChooserWindow(Qt.QWidget): def __init__(self): super().__init__() extensions_string = '' if extensions: for entry in extensions: # entry → Filter name (i.e. 'Image Files' etc) # value → Filter expression (i.e. '*.png, *.jpg' # etc) extensions_string += '%s (%s);;' % (entry, ' '.join(extensions[entry])) else: extensions_string = 'All Files (*)' dialog = Qt.QFileDialog() if multiple_files: dialog.setFileMode(Qt.QFileDialog.ExistingFiles) if directory: dialog.setFileMode(Qt.QFileDialog.Directory) dialog.setWindowTitle(title) dialog.setDirectory(default_dir) dialog.setNameFilter(extensions_string) if dialog.exec_(): self.path = dialog.selectedFiles() else: self.path = None app = Qt.QApplication(sys.argv) win = FileChooserWindow() win.close() if win.path: return win.path else: return None app.exec_() def gtk2_dialog(): # GTK+ 2 import pygtk pygtk.require('2.0') dialog = gtk.FileChooserDialog(title, None, gtk.FILE_CHOOSER_ACTION_OPEN, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK)) dialog.set_default_response(gtk.RESPONSE_OK) if extensions: for entry in extensions: file_filter = gtk.FileFilter() file_filter.set_name(entry) for pattern in extensions[entry]: file_filter.add_pattern(pattern) dialog.add_filter(file_filter) dialog.set_select_multiple(multiple_files) response = dialog.run() if response == gtk.RESPONSE_OK: return dialog.get_filenames() elif response == gtk.RESPONSE_CANCEL: return None dialog.destroy() if system.get_name() in ['gnome', 'unity', 'cinnamon', 'pantheon']: return gtk3_dialog() elif system.get_name() in ['kde', 'lxqt']: try: return qt5_dialog() except ImportError: return gtk3_dialog() else: try: return gtk2_dialog() except ImportError: return gtk3_dialog()
python
def open_file(default_dir='~', extensions=None, title='Choose a file', multiple_files=False, directory=False): '''Start the native file dialog for opening file(s). Starts the system native file dialog in order to open a file (or multiple files). The toolkit used for each platform: +-------------------------------------+------------------------------+ | Windows | Windows API (Win32) | +-------------------------------------+------------------------------+ | Mac OS X | Cocoa | +-------------------------------------+------------------------------+ | GNOME, Unity, Cinnamon, Pantheon | GTK+ 3 | +-------------------------------------+------------------------------+ | KDE, LXQt | Qt 5 (fallback: Qt 4/GTK+ 3) | +-------------------------------------+------------------------------+ | Other desktops (Xfce, WMs etc) | GTK+ 2 (fallback: GTK+ 3) | +-------------------------------------+------------------------------+ **Note on Dependencies** It depends on pywin32 for Windows (installed by default in Python for Windows) It depends on `PyQt <https://riverbankcomputing.com/software/pyqt>`_ for KDE and LxQt (usually installed by default on these). It depends on `PyGObject <https://wiki.gnome.org/Projects/PyGObject>`_ for GNOME etc. (virtually every Linux desktop has this). It depends on `PyGTK <https://pygtk.org>`_ for other desktops (not usually installed, so has a GTK+ 3 fallback). Args: default_dir (str) : The directory to start the dialog in. Default: User home directory. extensions (dict) : The extensions to filter by. Format: .. code-block:: python { 'Filter Name (example: Image Files)': ['*.png', '*.whatever', '*'] } title (str) : The title of the dialog. Default: `Choose a file` multiple_files (bool): Whether to choose multiple files or single files only. Default: `False` directory (bool): Whether to choose directories. Default: `False` Returns: list: `list` of `str` s (each `str` being a selected file). If nothing is selected/dialog is cancelled, it is `None`. ''' default_dir = os.path.expanduser(default_dir) if not extensions: extensions = {} if system.get_name() == 'windows': pass # TODO: Implement Win32 file dialog elif system.get_name() == 'mac': pass # TODO: Implement Cocoa file dialog else: def gtk3_dialog(): # GTK+ 3 import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk class FileChooserWindow(Gtk.Window): def __init__(self): self.path = '' Gtk.Window.__init__(self, title='') dialog = Gtk.FileChooserDialog(title, None, Gtk.FileChooserAction.OPEN, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_OPEN, Gtk.ResponseType.OK) ) if extensions: for entry in extensions: file_filter = Gtk.FileFilter() file_filter.set_name(entry) for pattern in extensions[entry]: file_filter.add_pattern(pattern) dialog.add_filter(file_filter) dialog.set_select_multiple(multiple_files) dialog.set_current_folder(default_dir) response = dialog.run() if response == Gtk.ResponseType.OK: self.path = dialog.get_filenames() dialog.destroy() elif response == Gtk.ResponseType.CANCEL: self.path = None dialog.destroy() win = FileChooserWindow() win.connect('destroy', Gtk.main_quit) win.connect('delete-event', Gtk.main_quit) win.show_all() win.destroy() win.close() return win.path def qt5_dialog(): # Qt 5 try: from PyQt5 import Qt except ImportError: # The API is the same for what this uses from PyQt4 import Qt class FileChooserWindow(Qt.QWidget): def __init__(self): super().__init__() extensions_string = '' if extensions: for entry in extensions: # entry → Filter name (i.e. 'Image Files' etc) # value → Filter expression (i.e. '*.png, *.jpg' # etc) extensions_string += '%s (%s);;' % (entry, ' '.join(extensions[entry])) else: extensions_string = 'All Files (*)' dialog = Qt.QFileDialog() if multiple_files: dialog.setFileMode(Qt.QFileDialog.ExistingFiles) if directory: dialog.setFileMode(Qt.QFileDialog.Directory) dialog.setWindowTitle(title) dialog.setDirectory(default_dir) dialog.setNameFilter(extensions_string) if dialog.exec_(): self.path = dialog.selectedFiles() else: self.path = None app = Qt.QApplication(sys.argv) win = FileChooserWindow() win.close() if win.path: return win.path else: return None app.exec_() def gtk2_dialog(): # GTK+ 2 import pygtk pygtk.require('2.0') dialog = gtk.FileChooserDialog(title, None, gtk.FILE_CHOOSER_ACTION_OPEN, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK)) dialog.set_default_response(gtk.RESPONSE_OK) if extensions: for entry in extensions: file_filter = gtk.FileFilter() file_filter.set_name(entry) for pattern in extensions[entry]: file_filter.add_pattern(pattern) dialog.add_filter(file_filter) dialog.set_select_multiple(multiple_files) response = dialog.run() if response == gtk.RESPONSE_OK: return dialog.get_filenames() elif response == gtk.RESPONSE_CANCEL: return None dialog.destroy() if system.get_name() in ['gnome', 'unity', 'cinnamon', 'pantheon']: return gtk3_dialog() elif system.get_name() in ['kde', 'lxqt']: try: return qt5_dialog() except ImportError: return gtk3_dialog() else: try: return gtk2_dialog() except ImportError: return gtk3_dialog()
[ "def", "open_file", "(", "default_dir", "=", "'~'", ",", "extensions", "=", "None", ",", "title", "=", "'Choose a file'", ",", "multiple_files", "=", "False", ",", "directory", "=", "False", ")", ":", "default_dir", "=", "os", ".", "path", ".", "expanduser...
Start the native file dialog for opening file(s). Starts the system native file dialog in order to open a file (or multiple files). The toolkit used for each platform: +-------------------------------------+------------------------------+ | Windows | Windows API (Win32) | +-------------------------------------+------------------------------+ | Mac OS X | Cocoa | +-------------------------------------+------------------------------+ | GNOME, Unity, Cinnamon, Pantheon | GTK+ 3 | +-------------------------------------+------------------------------+ | KDE, LXQt | Qt 5 (fallback: Qt 4/GTK+ 3) | +-------------------------------------+------------------------------+ | Other desktops (Xfce, WMs etc) | GTK+ 2 (fallback: GTK+ 3) | +-------------------------------------+------------------------------+ **Note on Dependencies** It depends on pywin32 for Windows (installed by default in Python for Windows) It depends on `PyQt <https://riverbankcomputing.com/software/pyqt>`_ for KDE and LxQt (usually installed by default on these). It depends on `PyGObject <https://wiki.gnome.org/Projects/PyGObject>`_ for GNOME etc. (virtually every Linux desktop has this). It depends on `PyGTK <https://pygtk.org>`_ for other desktops (not usually installed, so has a GTK+ 3 fallback). Args: default_dir (str) : The directory to start the dialog in. Default: User home directory. extensions (dict) : The extensions to filter by. Format: .. code-block:: python { 'Filter Name (example: Image Files)': ['*.png', '*.whatever', '*'] } title (str) : The title of the dialog. Default: `Choose a file` multiple_files (bool): Whether to choose multiple files or single files only. Default: `False` directory (bool): Whether to choose directories. Default: `False` Returns: list: `list` of `str` s (each `str` being a selected file). If nothing is selected/dialog is cancelled, it is `None`.
[ "Start", "the", "native", "file", "dialog", "for", "opening", "file", "(", "s", ")", "." ]
train
https://github.com/bharadwaj-raju/libdesktop/blob/4d6b815755c76660b6ef4d2db6f54beff38c0db7/libdesktop/dialog/files.py#L33-L261
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/dump.py
set_dump_directory
def set_dump_directory(base=None, sub_dir=None): """Create directory for dumping SQL commands.""" # Set current timestamp timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H-%M-%S') # Clean sub_dir if sub_dir and '.' in sub_dir: sub_dir = sub_dir.rsplit('.', 1)[0] # Create a directory to save fail SQL scripts # TODO: Replace with function that recursively creates directories until path exists if not os.path.exists(base): os.mkdir(base) dump_dir = os.path.join(base, sub_dir) if sub_dir else base if not os.path.exists(dump_dir): os.mkdir(dump_dir) dump_dir = os.path.join(dump_dir, timestamp) if not os.path.exists(dump_dir): os.mkdir(dump_dir) return dump_dir
python
def set_dump_directory(base=None, sub_dir=None): """Create directory for dumping SQL commands.""" # Set current timestamp timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H-%M-%S') # Clean sub_dir if sub_dir and '.' in sub_dir: sub_dir = sub_dir.rsplit('.', 1)[0] # Create a directory to save fail SQL scripts # TODO: Replace with function that recursively creates directories until path exists if not os.path.exists(base): os.mkdir(base) dump_dir = os.path.join(base, sub_dir) if sub_dir else base if not os.path.exists(dump_dir): os.mkdir(dump_dir) dump_dir = os.path.join(dump_dir, timestamp) if not os.path.exists(dump_dir): os.mkdir(dump_dir) return dump_dir
[ "def", "set_dump_directory", "(", "base", "=", "None", ",", "sub_dir", "=", "None", ")", ":", "# Set current timestamp", "timestamp", "=", "datetime", ".", "fromtimestamp", "(", "time", "(", ")", ")", ".", "strftime", "(", "'%Y-%m-%d %H-%M-%S'", ")", "# Clean ...
Create directory for dumping SQL commands.
[ "Create", "directory", "for", "dumping", "SQL", "commands", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/dump.py#L20-L39
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/dump.py
dump_commands
def dump_commands(commands, directory=None, sub_dir=None): """ Dump SQL commands to .sql files. :param commands: List of SQL commands :param directory: Directory to dump commands to :param sub_dir: Sub directory :return: Directory failed commands were dumped to """ print('\t' + str(len(commands)), 'failed commands') # Create dump_dir directory if directory and os.path.isfile(directory): dump_dir = set_dump_directory(os.path.dirname(directory), sub_dir) return_dir = dump_dir elif directory: dump_dir = set_dump_directory(directory, sub_dir) return_dir = dump_dir else: dump_dir = TemporaryDirectory().name return_dir = TemporaryDirectory() # Create list of (path, content) tuples command_filepath = [(fail, os.path.join(dump_dir, str(count) + '.sql')) for count, fail in enumerate(commands)] # Dump failed commands to text file in the same directory as the commands # Utilize's multiprocessing module if it is available timer = Timer() if MULTIPROCESS: pool = Pool(cpu_count()) pool.map(write_text_tup, command_filepath) pool.close() print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end), '\n\t\tMethod : (multiprocessing)\n\t\tDirectory : {0}'.format(dump_dir)) else: for tup in command_filepath: write_text_tup(tup) print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end), '\n\t\tMethod : (sequential)\n\t\tDirectory : {0}'.format(dump_dir)) # Return base directory of dumped commands return return_dir
python
def dump_commands(commands, directory=None, sub_dir=None): """ Dump SQL commands to .sql files. :param commands: List of SQL commands :param directory: Directory to dump commands to :param sub_dir: Sub directory :return: Directory failed commands were dumped to """ print('\t' + str(len(commands)), 'failed commands') # Create dump_dir directory if directory and os.path.isfile(directory): dump_dir = set_dump_directory(os.path.dirname(directory), sub_dir) return_dir = dump_dir elif directory: dump_dir = set_dump_directory(directory, sub_dir) return_dir = dump_dir else: dump_dir = TemporaryDirectory().name return_dir = TemporaryDirectory() # Create list of (path, content) tuples command_filepath = [(fail, os.path.join(dump_dir, str(count) + '.sql')) for count, fail in enumerate(commands)] # Dump failed commands to text file in the same directory as the commands # Utilize's multiprocessing module if it is available timer = Timer() if MULTIPROCESS: pool = Pool(cpu_count()) pool.map(write_text_tup, command_filepath) pool.close() print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end), '\n\t\tMethod : (multiprocessing)\n\t\tDirectory : {0}'.format(dump_dir)) else: for tup in command_filepath: write_text_tup(tup) print('\tDumped ', len(command_filepath), 'commands\n\t\tTime : {0}'.format(timer.end), '\n\t\tMethod : (sequential)\n\t\tDirectory : {0}'.format(dump_dir)) # Return base directory of dumped commands return return_dir
[ "def", "dump_commands", "(", "commands", ",", "directory", "=", "None", ",", "sub_dir", "=", "None", ")", ":", "print", "(", "'\\t'", "+", "str", "(", "len", "(", "commands", ")", ")", ",", "'failed commands'", ")", "# Create dump_dir directory", "if", "di...
Dump SQL commands to .sql files. :param commands: List of SQL commands :param directory: Directory to dump commands to :param sub_dir: Sub directory :return: Directory failed commands were dumped to
[ "Dump", "SQL", "commands", "to", ".", "sql", "files", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/dump.py#L42-L83
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/dump.py
write_text
def write_text(_command, txt_file): """Dump SQL command to a text file.""" command = _command.strip() with open(txt_file, 'w') as txt: txt.writelines(command)
python
def write_text(_command, txt_file): """Dump SQL command to a text file.""" command = _command.strip() with open(txt_file, 'w') as txt: txt.writelines(command)
[ "def", "write_text", "(", "_command", ",", "txt_file", ")", ":", "command", "=", "_command", ".", "strip", "(", ")", "with", "open", "(", "txt_file", ",", "'w'", ")", "as", "txt", ":", "txt", ".", "writelines", "(", "command", ")" ]
Dump SQL command to a text file.
[ "Dump", "SQL", "command", "to", "a", "text", "file", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/dump.py#L86-L90
mrstephenneal/mysql-toolkit
mysql/toolkit/commands/dump.py
get_commands_from_dir
def get_commands_from_dir(directory, zip_backup=True, remove_dir=True): """Traverse a directory and read contained SQL files.""" # Get SQL commands file paths failed_scripts = sorted([os.path.join(directory, fn) for fn in os.listdir(directory) if fn.endswith('.sql')]) # Read each failed SQL file and append contents to a list print('\tReading SQL scripts from files') commands = [] for sql_file in failed_scripts: with open(sql_file, 'r') as txt: sql_command = txt.read() commands.append(sql_command) # Remove most recent failures folder after reading if zip_backup: ZipBackup(directory).backup() if remove_dir: shutil.rmtree(directory) return commands
python
def get_commands_from_dir(directory, zip_backup=True, remove_dir=True): """Traverse a directory and read contained SQL files.""" # Get SQL commands file paths failed_scripts = sorted([os.path.join(directory, fn) for fn in os.listdir(directory) if fn.endswith('.sql')]) # Read each failed SQL file and append contents to a list print('\tReading SQL scripts from files') commands = [] for sql_file in failed_scripts: with open(sql_file, 'r') as txt: sql_command = txt.read() commands.append(sql_command) # Remove most recent failures folder after reading if zip_backup: ZipBackup(directory).backup() if remove_dir: shutil.rmtree(directory) return commands
[ "def", "get_commands_from_dir", "(", "directory", ",", "zip_backup", "=", "True", ",", "remove_dir", "=", "True", ")", ":", "# Get SQL commands file paths", "failed_scripts", "=", "sorted", "(", "[", "os", ".", "path", ".", "join", "(", "directory", ",", "fn",...
Traverse a directory and read contained SQL files.
[ "Traverse", "a", "directory", "and", "read", "contained", "SQL", "files", "." ]
train
https://github.com/mrstephenneal/mysql-toolkit/blob/6964f718f4b72eb30f2259adfcfaf3090526c53d/mysql/toolkit/commands/dump.py#L104-L122
anomaly/prestans
prestans/parser/parameter_set.py
ParameterSet.blueprint
def blueprint(self): """ blueprint support, returns a partial dictionary """ blueprint = dict() blueprint['type'] = "%s.%s" % (self.__module__, self.__class__.__name__) # Fields fields = dict() # inspects the attributes of a parameter set and tries to validate the input for attribute_name, type_instance in self.getmembers(): # must be one of the following types if not isinstance(type_instance, String) and \ not isinstance(type_instance, Float) and \ not isinstance(type_instance, Integer) and \ not isinstance(type_instance, Date) and \ not isinstance(type_instance, DateTime) and \ not isinstance(type_instance, Array): raise TypeError("%s should be instance of\ prestans.types.String/Integer/Float/Date/DateTime/Array" % attribute_name) if isinstance(type_instance, Array): if not isinstance(type_instance.element_template, String) and \ not isinstance(type_instance.element_template, Float) and \ not isinstance(type_instance.element_template, Integer): raise TypeError("%s should be instance of \ prestans.types.String/Integer/Float/Array" % attribute_name) fields[attribute_name] = type_instance.blueprint() blueprint['fields'] = fields return blueprint
python
def blueprint(self): """ blueprint support, returns a partial dictionary """ blueprint = dict() blueprint['type'] = "%s.%s" % (self.__module__, self.__class__.__name__) # Fields fields = dict() # inspects the attributes of a parameter set and tries to validate the input for attribute_name, type_instance in self.getmembers(): # must be one of the following types if not isinstance(type_instance, String) and \ not isinstance(type_instance, Float) and \ not isinstance(type_instance, Integer) and \ not isinstance(type_instance, Date) and \ not isinstance(type_instance, DateTime) and \ not isinstance(type_instance, Array): raise TypeError("%s should be instance of\ prestans.types.String/Integer/Float/Date/DateTime/Array" % attribute_name) if isinstance(type_instance, Array): if not isinstance(type_instance.element_template, String) and \ not isinstance(type_instance.element_template, Float) and \ not isinstance(type_instance.element_template, Integer): raise TypeError("%s should be instance of \ prestans.types.String/Integer/Float/Array" % attribute_name) fields[attribute_name] = type_instance.blueprint() blueprint['fields'] = fields return blueprint
[ "def", "blueprint", "(", "self", ")", ":", "blueprint", "=", "dict", "(", ")", "blueprint", "[", "'type'", "]", "=", "\"%s.%s\"", "%", "(", "self", ".", "__module__", ",", "self", ".", "__class__", ".", "__name__", ")", "# Fields", "fields", "=", "dict...
blueprint support, returns a partial dictionary
[ "blueprint", "support", "returns", "a", "partial", "dictionary" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/parameter_set.py#L56-L90
anomaly/prestans
prestans/parser/parameter_set.py
ParameterSet.validate
def validate(self, request): """ validate method for %ParameterSet Since the introduction of ResponseFieldListParser, the parameter _response_field_list will be ignored, this is a prestans reserved parameter, and cannot be used by apps. :param request: The request object to be validated :type request: webob.request.Request :return The validated parameter set :rtype: ParameterSet """ validated_parameter_set = self.__class__() # Inspects the attributes of a parameter set and tries to validate the input for attribute_name, type_instance in self.getmembers(): #: Must be one of the following types if not isinstance(type_instance, String) and \ not isinstance(type_instance, Float) and \ not isinstance(type_instance, Integer) and \ not isinstance(type_instance, Date) and \ not isinstance(type_instance, DateTime) and \ not isinstance(type_instance, Array): raise TypeError("%s should be of type \ prestans.types.String/Integer/Float/Date/DateTime/Array" % attribute_name) if issubclass(type_instance.__class__, Array): if not isinstance(type_instance.element_template, String) and \ not isinstance(type_instance.element_template, Float) and \ not isinstance(type_instance.element_template, Integer): raise TypeError("%s elements should be of \ type prestans.types.String/Integer/Float" % attribute_name) try: #: Get input from parameters #: Empty list returned if key is missing for getall if issubclass(type_instance.__class__, Array): validation_input = request.params.getall(attribute_name) #: Key error thrown if key is missing for getone else: try: validation_input = request.params.getone(attribute_name) except KeyError: validation_input = None #: Validate input based on data type rules, #: raises DataTypeValidationException if validation fails validation_result = type_instance.validate(validation_input) setattr(validated_parameter_set, attribute_name, validation_result) except exception.DataValidationException as exp: raise exception.ValidationError( message=str(exp), attribute_name=attribute_name, value=validation_input, blueprint=type_instance.blueprint()) return validated_parameter_set
python
def validate(self, request): """ validate method for %ParameterSet Since the introduction of ResponseFieldListParser, the parameter _response_field_list will be ignored, this is a prestans reserved parameter, and cannot be used by apps. :param request: The request object to be validated :type request: webob.request.Request :return The validated parameter set :rtype: ParameterSet """ validated_parameter_set = self.__class__() # Inspects the attributes of a parameter set and tries to validate the input for attribute_name, type_instance in self.getmembers(): #: Must be one of the following types if not isinstance(type_instance, String) and \ not isinstance(type_instance, Float) and \ not isinstance(type_instance, Integer) and \ not isinstance(type_instance, Date) and \ not isinstance(type_instance, DateTime) and \ not isinstance(type_instance, Array): raise TypeError("%s should be of type \ prestans.types.String/Integer/Float/Date/DateTime/Array" % attribute_name) if issubclass(type_instance.__class__, Array): if not isinstance(type_instance.element_template, String) and \ not isinstance(type_instance.element_template, Float) and \ not isinstance(type_instance.element_template, Integer): raise TypeError("%s elements should be of \ type prestans.types.String/Integer/Float" % attribute_name) try: #: Get input from parameters #: Empty list returned if key is missing for getall if issubclass(type_instance.__class__, Array): validation_input = request.params.getall(attribute_name) #: Key error thrown if key is missing for getone else: try: validation_input = request.params.getone(attribute_name) except KeyError: validation_input = None #: Validate input based on data type rules, #: raises DataTypeValidationException if validation fails validation_result = type_instance.validate(validation_input) setattr(validated_parameter_set, attribute_name, validation_result) except exception.DataValidationException as exp: raise exception.ValidationError( message=str(exp), attribute_name=attribute_name, value=validation_input, blueprint=type_instance.blueprint()) return validated_parameter_set
[ "def", "validate", "(", "self", ",", "request", ")", ":", "validated_parameter_set", "=", "self", ".", "__class__", "(", ")", "# Inspects the attributes of a parameter set and tries to validate the input", "for", "attribute_name", ",", "type_instance", "in", "self", ".", ...
validate method for %ParameterSet Since the introduction of ResponseFieldListParser, the parameter _response_field_list will be ignored, this is a prestans reserved parameter, and cannot be used by apps. :param request: The request object to be validated :type request: webob.request.Request :return The validated parameter set :rtype: ParameterSet
[ "validate", "method", "for", "%ParameterSet" ]
train
https://github.com/anomaly/prestans/blob/13f5b2467bfd403dcd2d085f15cbf4644044f105/prestans/parser/parameter_set.py#L92-L154
Rockhopper-Technologies/pluginlib
pluginlib/_parent.py
_check_methods
def _check_methods(cls, subclass): # pylint: disable=too-many-branches """ Args: cls(:py:class:`Plugin`): Parent class subclass(:py:class:`Plugin`): Subclass to evaluate Returns: Result: Named tuple Validate abstract methods are defined in subclass For error codes see _inspect_class """ for meth, methobj in cls.__abstractmethods__.items(): # Need to get attribute from dictionary for instance tests to work for base in subclass.__mro__: # pragma: no branch if meth in base.__dict__: submethobj = base.__dict__[meth] break # If we found our abstract method, we didn't find anything if submethobj is methobj: submethobj = UNDEFINED # Determine if we have the right method type result = None bad_arg_spec = 'Argument spec does not match parent for method %s' # pylint: disable=deprecated-method if isinstance(methobj, property): if submethobj is UNDEFINED or not isinstance(submethobj, property): result = Result(False, 'Does not contain required property (%s)' % meth, 210) elif isinstance(methobj, staticmethod): if submethobj is UNDEFINED or not isinstance(submethobj, staticmethod): result = Result(False, 'Does not contain required static method (%s)' % meth, 211) elif PY26: # pragma: no cover if getfullargspec(methobj.__get__(True)) != \ getfullargspec(submethobj.__get__(True)): result = Result(False, bad_arg_spec % meth, 220) elif getfullargspec(methobj.__func__) != getfullargspec(submethobj.__func__): result = Result(False, bad_arg_spec % meth, 220) elif isinstance(methobj, classmethod): if submethobj is UNDEFINED or not isinstance(submethobj, classmethod): result = Result(False, 'Does not contain required class method (%s)' % meth, 212) elif PY26: # pragma: no cover if getfullargspec(methobj.__get__(True).__func__) != \ getfullargspec(submethobj.__get__(True).__func__): result = Result(False, bad_arg_spec % meth, 220) elif getfullargspec(methobj.__func__) != getfullargspec(submethobj.__func__): result = Result(False, bad_arg_spec % meth, 220) elif isfunction(methobj): if submethobj is UNDEFINED or not isfunction(submethobj): result = Result(False, 'Does not contain required method (%s)' % meth, 213) elif getfullargspec(methobj) != getfullargspec(submethobj): result = Result(False, bad_arg_spec % meth, 220) # If it's not a type we're specifically checking, just check for existence elif submethobj is UNDEFINED: result = Result(False, 'Does not contain required attribute (%s)' % meth, 214) if result: return result return Result(True, None, 0)
python
def _check_methods(cls, subclass): # pylint: disable=too-many-branches """ Args: cls(:py:class:`Plugin`): Parent class subclass(:py:class:`Plugin`): Subclass to evaluate Returns: Result: Named tuple Validate abstract methods are defined in subclass For error codes see _inspect_class """ for meth, methobj in cls.__abstractmethods__.items(): # Need to get attribute from dictionary for instance tests to work for base in subclass.__mro__: # pragma: no branch if meth in base.__dict__: submethobj = base.__dict__[meth] break # If we found our abstract method, we didn't find anything if submethobj is methobj: submethobj = UNDEFINED # Determine if we have the right method type result = None bad_arg_spec = 'Argument spec does not match parent for method %s' # pylint: disable=deprecated-method if isinstance(methobj, property): if submethobj is UNDEFINED or not isinstance(submethobj, property): result = Result(False, 'Does not contain required property (%s)' % meth, 210) elif isinstance(methobj, staticmethod): if submethobj is UNDEFINED or not isinstance(submethobj, staticmethod): result = Result(False, 'Does not contain required static method (%s)' % meth, 211) elif PY26: # pragma: no cover if getfullargspec(methobj.__get__(True)) != \ getfullargspec(submethobj.__get__(True)): result = Result(False, bad_arg_spec % meth, 220) elif getfullargspec(methobj.__func__) != getfullargspec(submethobj.__func__): result = Result(False, bad_arg_spec % meth, 220) elif isinstance(methobj, classmethod): if submethobj is UNDEFINED or not isinstance(submethobj, classmethod): result = Result(False, 'Does not contain required class method (%s)' % meth, 212) elif PY26: # pragma: no cover if getfullargspec(methobj.__get__(True).__func__) != \ getfullargspec(submethobj.__get__(True).__func__): result = Result(False, bad_arg_spec % meth, 220) elif getfullargspec(methobj.__func__) != getfullargspec(submethobj.__func__): result = Result(False, bad_arg_spec % meth, 220) elif isfunction(methobj): if submethobj is UNDEFINED or not isfunction(submethobj): result = Result(False, 'Does not contain required method (%s)' % meth, 213) elif getfullargspec(methobj) != getfullargspec(submethobj): result = Result(False, bad_arg_spec % meth, 220) # If it's not a type we're specifically checking, just check for existence elif submethobj is UNDEFINED: result = Result(False, 'Does not contain required attribute (%s)' % meth, 214) if result: return result return Result(True, None, 0)
[ "def", "_check_methods", "(", "cls", ",", "subclass", ")", ":", "# pylint: disable=too-many-branches", "for", "meth", ",", "methobj", "in", "cls", ".", "__abstractmethods__", ".", "items", "(", ")", ":", "# Need to get attribute from dictionary for instance tests to work"...
Args: cls(:py:class:`Plugin`): Parent class subclass(:py:class:`Plugin`): Subclass to evaluate Returns: Result: Named tuple Validate abstract methods are defined in subclass For error codes see _inspect_class
[ "Args", ":", "cls", "(", ":", "py", ":", "class", ":", "Plugin", ")", ":", "Parent", "class", "subclass", "(", ":", "py", ":", "class", ":", "Plugin", ")", ":", "Subclass", "to", "evaluate" ]
train
https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_parent.py#L40-L107
Rockhopper-Technologies/pluginlib
pluginlib/_parent.py
_inspect_class
def _inspect_class(cls, subclass): """ Args: cls(:py:class:`Plugin`): Parent class subclass(:py:class:`Plugin`): Subclass to evaluate Returns: Result: Named tuple Inspect subclass for inclusion Values for errorcode: * 0: No error Error codes between 0 and 100 are not intended for import * 50 Skipload flag is True Error codes between 99 and 200 are excluded from import * 156: Skipload call returned True Error codes 200 and above are malformed classes * 210: Missing abstract property * 211: Missing abstract static method * 212: Missing abstract class method * 213: Missing abstract method * 214: Missing abstract attribute * 220: Argument spec does not match """ if callable(subclass._skipload_): result = subclass._skipload_() if isinstance(result, tuple): skip, msg = result else: skip, msg = result, None if skip: return Result(False, msg, 156) elif subclass._skipload_: return Result(False, 'Skipload flag is True', 50) return _check_methods(cls, subclass)
python
def _inspect_class(cls, subclass): """ Args: cls(:py:class:`Plugin`): Parent class subclass(:py:class:`Plugin`): Subclass to evaluate Returns: Result: Named tuple Inspect subclass for inclusion Values for errorcode: * 0: No error Error codes between 0 and 100 are not intended for import * 50 Skipload flag is True Error codes between 99 and 200 are excluded from import * 156: Skipload call returned True Error codes 200 and above are malformed classes * 210: Missing abstract property * 211: Missing abstract static method * 212: Missing abstract class method * 213: Missing abstract method * 214: Missing abstract attribute * 220: Argument spec does not match """ if callable(subclass._skipload_): result = subclass._skipload_() if isinstance(result, tuple): skip, msg = result else: skip, msg = result, None if skip: return Result(False, msg, 156) elif subclass._skipload_: return Result(False, 'Skipload flag is True', 50) return _check_methods(cls, subclass)
[ "def", "_inspect_class", "(", "cls", ",", "subclass", ")", ":", "if", "callable", "(", "subclass", ".", "_skipload_", ")", ":", "result", "=", "subclass", ".", "_skipload_", "(", ")", "if", "isinstance", "(", "result", ",", "tuple", ")", ":", "skip", "...
Args: cls(:py:class:`Plugin`): Parent class subclass(:py:class:`Plugin`): Subclass to evaluate Returns: Result: Named tuple Inspect subclass for inclusion Values for errorcode: * 0: No error Error codes between 0 and 100 are not intended for import * 50 Skipload flag is True Error codes between 99 and 200 are excluded from import * 156: Skipload call returned True Error codes 200 and above are malformed classes * 210: Missing abstract property * 211: Missing abstract static method * 212: Missing abstract class method * 213: Missing abstract method * 214: Missing abstract attribute * 220: Argument spec does not match
[ "Args", ":", "cls", "(", ":", "py", ":", "class", ":", "Plugin", ")", ":", "Parent", "class", "subclass", "(", ":", "py", ":", "class", ":", "Plugin", ")", ":", "Subclass", "to", "evaluate" ]
train
https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_parent.py#L110-L158
Rockhopper-Technologies/pluginlib
pluginlib/_parent.py
Plugin.version
def version(cls): # noqa: N805 # pylint: disable=no-self-argument """ :py:class:Returns `str` -- Returns :attr:`_version_` if set, otherwise falls back to module `__version__` or None """ return cls._version_ or getattr(sys.modules.get(cls.__module__, None), '__version__', None)
python
def version(cls): # noqa: N805 # pylint: disable=no-self-argument """ :py:class:Returns `str` -- Returns :attr:`_version_` if set, otherwise falls back to module `__version__` or None """ return cls._version_ or getattr(sys.modules.get(cls.__module__, None), '__version__', None)
[ "def", "version", "(", "cls", ")", ":", "# noqa: N805 # pylint: disable=no-self-argument", "return", "cls", ".", "_version_", "or", "getattr", "(", "sys", ".", "modules", ".", "get", "(", "cls", ".", "__module__", ",", "None", ")", ",", "'__version__'", ",", ...
:py:class:Returns `str` -- Returns :attr:`_version_` if set, otherwise falls back to module `__version__` or None
[ ":", "py", ":", "class", ":", "Returns", "str", "--", "Returns", ":", "attr", ":", "_version_", "if", "set", "otherwise", "falls", "back", "to", "module", "__version__", "or", "None" ]
train
https://github.com/Rockhopper-Technologies/pluginlib/blob/8beb78984dd9c97c493642df9da9f1b5a1c5e2b2/pluginlib/_parent.py#L277-L283
inveniosoftware/invenio-pages
invenio_pages/views.py
preload_pages
def preload_pages(): """Register all pages before the first application request.""" try: _add_url_rule([page.url for page in Page.query.all()]) except Exception: # pragma: no cover current_app.logger.warn('Pages were not loaded.') raise
python
def preload_pages(): """Register all pages before the first application request.""" try: _add_url_rule([page.url for page in Page.query.all()]) except Exception: # pragma: no cover current_app.logger.warn('Pages were not loaded.') raise
[ "def", "preload_pages", "(", ")", ":", "try", ":", "_add_url_rule", "(", "[", "page", ".", "url", "for", "page", "in", "Page", ".", "query", ".", "all", "(", ")", "]", ")", "except", "Exception", ":", "# pragma: no cover", "current_app", ".", "logger", ...
Register all pages before the first application request.
[ "Register", "all", "pages", "before", "the", "first", "application", "request", "." ]
train
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/views.py#L46-L52
inveniosoftware/invenio-pages
invenio_pages/views.py
render_page
def render_page(path): """Internal interface to the page view. :param path: Page path. :returns: The rendered template. """ try: page = Page.get_by_url(request.path) except NoResultFound: abort(404) return render_template( [page.template_name, current_app.config['PAGES_DEFAULT_TEMPLATE']], page=page)
python
def render_page(path): """Internal interface to the page view. :param path: Page path. :returns: The rendered template. """ try: page = Page.get_by_url(request.path) except NoResultFound: abort(404) return render_template( [page.template_name, current_app.config['PAGES_DEFAULT_TEMPLATE']], page=page)
[ "def", "render_page", "(", "path", ")", ":", "try", ":", "page", "=", "Page", ".", "get_by_url", "(", "request", ".", "path", ")", "except", "NoResultFound", ":", "abort", "(", "404", ")", "return", "render_template", "(", "[", "page", ".", "template_nam...
Internal interface to the page view. :param path: Page path. :returns: The rendered template.
[ "Internal", "interface", "to", "the", "page", "view", "." ]
train
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/views.py#L76-L89
inveniosoftware/invenio-pages
invenio_pages/views.py
handle_not_found
def handle_not_found(exception, **extra): """Custom blueprint exception handler.""" assert isinstance(exception, NotFound) page = Page.query.filter(db.or_(Page.url == request.path, Page.url == request.path + "/")).first() if page: _add_url_rule(page.url) return render_template( [ page.template_name, current_app.config['PAGES_DEFAULT_TEMPLATE'] ], page=page ) elif 'wrapped' in extra: return extra['wrapped'](exception) else: return exception
python
def handle_not_found(exception, **extra): """Custom blueprint exception handler.""" assert isinstance(exception, NotFound) page = Page.query.filter(db.or_(Page.url == request.path, Page.url == request.path + "/")).first() if page: _add_url_rule(page.url) return render_template( [ page.template_name, current_app.config['PAGES_DEFAULT_TEMPLATE'] ], page=page ) elif 'wrapped' in extra: return extra['wrapped'](exception) else: return exception
[ "def", "handle_not_found", "(", "exception", ",", "*", "*", "extra", ")", ":", "assert", "isinstance", "(", "exception", ",", "NotFound", ")", "page", "=", "Page", ".", "query", ".", "filter", "(", "db", ".", "or_", "(", "Page", ".", "url", "==", "re...
Custom blueprint exception handler.
[ "Custom", "blueprint", "exception", "handler", "." ]
train
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/views.py#L92-L111
inveniosoftware/invenio-pages
invenio_pages/views.py
_add_url_rule
def _add_url_rule(url_or_urls): """Register URL rule to application URL map.""" old = current_app._got_first_request # This is bit of cheating to overcome @flask.app.setupmethod decorator. current_app._got_first_request = False if isinstance(url_or_urls, six.string_types): url_or_urls = [url_or_urls] map(lambda url: current_app.add_url_rule(url, 'invenio_pages.view', view), url_or_urls) current_app._got_first_request = old
python
def _add_url_rule(url_or_urls): """Register URL rule to application URL map.""" old = current_app._got_first_request # This is bit of cheating to overcome @flask.app.setupmethod decorator. current_app._got_first_request = False if isinstance(url_or_urls, six.string_types): url_or_urls = [url_or_urls] map(lambda url: current_app.add_url_rule(url, 'invenio_pages.view', view), url_or_urls) current_app._got_first_request = old
[ "def", "_add_url_rule", "(", "url_or_urls", ")", ":", "old", "=", "current_app", ".", "_got_first_request", "# This is bit of cheating to overcome @flask.app.setupmethod decorator.", "current_app", ".", "_got_first_request", "=", "False", "if", "isinstance", "(", "url_or_urls...
Register URL rule to application URL map.
[ "Register", "URL", "rule", "to", "application", "URL", "map", "." ]
train
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/views.py#L114-L123
PGower/PyCanvas
pycanvas/apis/collaborations.py
CollaborationsAPI.list_members_of_collaboration
def list_members_of_collaboration(self, id, include=None): """ List members of a collaboration. List the collaborators of a given collaboration """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - include """- "collaborator_lti_id": Optional information to include with each member. Represents an identifier to be used for the member in an LTI context. - "avatar_image_url": Optional information to include with each member. The url for the avatar of a collaborator with type 'user'.""" if include is not None: self._validate_enum(include, ["collaborator_lti_id", "avatar_image_url"]) params["include"] = include self.logger.debug("GET /api/v1/collaborations/{id}/members with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/collaborations/{id}/members".format(**path), data=data, params=params, all_pages=True)
python
def list_members_of_collaboration(self, id, include=None): """ List members of a collaboration. List the collaborators of a given collaboration """ path = {} data = {} params = {} # REQUIRED - PATH - id """ID""" path["id"] = id # OPTIONAL - include """- "collaborator_lti_id": Optional information to include with each member. Represents an identifier to be used for the member in an LTI context. - "avatar_image_url": Optional information to include with each member. The url for the avatar of a collaborator with type 'user'.""" if include is not None: self._validate_enum(include, ["collaborator_lti_id", "avatar_image_url"]) params["include"] = include self.logger.debug("GET /api/v1/collaborations/{id}/members with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/collaborations/{id}/members".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_members_of_collaboration", "(", "self", ",", "id", ",", "include", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - id\r", "\"\"\"ID\"\"\"", "path", "[", "\"id\"", "]", "=", "...
List members of a collaboration. List the collaborators of a given collaboration
[ "List", "members", "of", "a", "collaboration", ".", "List", "the", "collaborators", "of", "a", "given", "collaboration" ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/collaborations.py#L61-L85
PGower/PyCanvas
pycanvas/apis/discussion_topics.py
DiscussionTopicsAPI.list_discussion_topics_courses
def list_discussion_topics_courses(self, course_id, exclude_context_module_locked_topics=None, include=None, only_announcements=None, order_by=None, scope=None, search_term=None): """ List discussion topics. Returns the paginated list of discussion topics for this course or group. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - include """If "all_dates" is passed, all dates associated with graded discussions' assignments will be included.""" if include is not None: self._validate_enum(include, ["all_dates"]) params["include"] = include # OPTIONAL - order_by """Determines the order of the discussion topic list. Defaults to "position".""" if order_by is not None: self._validate_enum(order_by, ["position", "recent_activity"]) params["order_by"] = order_by # OPTIONAL - scope """Only return discussion topics in the given state(s). Defaults to including all topics. Filtering is done after pagination, so pages may be smaller than requested if topics are filtered. Can pass multiple states as comma separated string.""" if scope is not None: self._validate_enum(scope, ["locked", "unlocked", "pinned", "unpinned"]) params["scope"] = scope # OPTIONAL - only_announcements """Return announcements instead of discussion topics. Defaults to false""" if only_announcements is not None: params["only_announcements"] = only_announcements # OPTIONAL - search_term """The partial title of the discussion topics to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - exclude_context_module_locked_topics """For students, exclude topics that are locked by module progression. Defaults to false.""" if exclude_context_module_locked_topics is not None: params["exclude_context_module_locked_topics"] = exclude_context_module_locked_topics self.logger.debug("GET /api/v1/courses/{course_id}/discussion_topics with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/discussion_topics".format(**path), data=data, params=params, all_pages=True)
python
def list_discussion_topics_courses(self, course_id, exclude_context_module_locked_topics=None, include=None, only_announcements=None, order_by=None, scope=None, search_term=None): """ List discussion topics. Returns the paginated list of discussion topics for this course or group. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - include """If "all_dates" is passed, all dates associated with graded discussions' assignments will be included.""" if include is not None: self._validate_enum(include, ["all_dates"]) params["include"] = include # OPTIONAL - order_by """Determines the order of the discussion topic list. Defaults to "position".""" if order_by is not None: self._validate_enum(order_by, ["position", "recent_activity"]) params["order_by"] = order_by # OPTIONAL - scope """Only return discussion topics in the given state(s). Defaults to including all topics. Filtering is done after pagination, so pages may be smaller than requested if topics are filtered. Can pass multiple states as comma separated string.""" if scope is not None: self._validate_enum(scope, ["locked", "unlocked", "pinned", "unpinned"]) params["scope"] = scope # OPTIONAL - only_announcements """Return announcements instead of discussion topics. Defaults to false""" if only_announcements is not None: params["only_announcements"] = only_announcements # OPTIONAL - search_term """The partial title of the discussion topics to match and return.""" if search_term is not None: params["search_term"] = search_term # OPTIONAL - exclude_context_module_locked_topics """For students, exclude topics that are locked by module progression. Defaults to false.""" if exclude_context_module_locked_topics is not None: params["exclude_context_module_locked_topics"] = exclude_context_module_locked_topics self.logger.debug("GET /api/v1/courses/{course_id}/discussion_topics with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/discussion_topics".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_discussion_topics_courses", "(", "self", ",", "course_id", ",", "exclude_context_module_locked_topics", "=", "None", ",", "include", "=", "None", ",", "only_announcements", "=", "None", ",", "order_by", "=", "None", ",", "scope", "=", "None", ",", "...
List discussion topics. Returns the paginated list of discussion topics for this course or group.
[ "List", "discussion", "topics", ".", "Returns", "the", "paginated", "list", "of", "discussion", "topics", "for", "this", "course", "or", "group", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/discussion_topics.py#L19-L72
PGower/PyCanvas
pycanvas/apis/discussion_topics.py
DiscussionTopicsAPI.create_new_discussion_topic_courses
def create_new_discussion_topic_courses(self, course_id, allow_rating=None, assignment=None, attachment=None, delayed_post_at=None, discussion_type=None, group_category_id=None, is_announcement=None, lock_at=None, message=None, only_graders_can_rate=None, pinned=None, podcast_enabled=None, podcast_has_student_posts=None, position_after=None, published=None, require_initial_post=None, sort_by_rating=None, title=None): """ Create a new discussion topic. Create an new discussion topic for the course or group. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - title """no description""" if title is not None: data["title"] = title # OPTIONAL - message """no description""" if message is not None: data["message"] = message # OPTIONAL - discussion_type """The type of discussion. Defaults to side_comment if not value is given. Accepted values are 'side_comment', for discussions that only allow one level of nested comments, and 'threaded' for fully threaded discussions.""" if discussion_type is not None: self._validate_enum(discussion_type, ["side_comment", "threaded"]) data["discussion_type"] = discussion_type # OPTIONAL - published """Whether this topic is published (true) or draft state (false). Only teachers and TAs have the ability to create draft state topics.""" if published is not None: data["published"] = published # OPTIONAL - delayed_post_at """If a timestamp is given, the topic will not be published until that time.""" if delayed_post_at is not None: data["delayed_post_at"] = delayed_post_at # OPTIONAL - lock_at """If a timestamp is given, the topic will be scheduled to lock at the provided timestamp. If the timestamp is in the past, the topic will be locked.""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - podcast_enabled """If true, the topic will have an associated podcast feed.""" if podcast_enabled is not None: data["podcast_enabled"] = podcast_enabled # OPTIONAL - podcast_has_student_posts """If true, the podcast will include posts from students as well. Implies podcast_enabled.""" if podcast_has_student_posts is not None: data["podcast_has_student_posts"] = podcast_has_student_posts # OPTIONAL - require_initial_post """If true then a user may not respond to other replies until that user has made an initial reply. Defaults to false.""" if require_initial_post is not None: data["require_initial_post"] = require_initial_post # OPTIONAL - assignment """To create an assignment discussion, pass the assignment parameters as a sub-object. See the {api:AssignmentsApiController#create Create an Assignment API} for the available parameters. The name parameter will be ignored, as it's taken from the discussion title. If you want to make a discussion that was an assignment NOT an assignment, pass set_assignment = false as part of the assignment object""" if assignment is not None: data["assignment"] = assignment # OPTIONAL - is_announcement """If true, this topic is an announcement. It will appear in the announcement's section rather than the discussions section. This requires announcment-posting permissions.""" if is_announcement is not None: data["is_announcement"] = is_announcement # OPTIONAL - pinned """If true, this topic will be listed in the "Pinned Discussion" section""" if pinned is not None: data["pinned"] = pinned # OPTIONAL - position_after """By default, discussions are sorted chronologically by creation date, you can pass the id of another topic to have this one show up after the other when they are listed.""" if position_after is not None: data["position_after"] = position_after # OPTIONAL - group_category_id """If present, the topic will become a group discussion assigned to the group.""" if group_category_id is not None: data["group_category_id"] = group_category_id # OPTIONAL - allow_rating """If true, users will be allowed to rate entries.""" if allow_rating is not None: data["allow_rating"] = allow_rating # OPTIONAL - only_graders_can_rate """If true, only graders will be allowed to rate entries.""" if only_graders_can_rate is not None: data["only_graders_can_rate"] = only_graders_can_rate # OPTIONAL - sort_by_rating """If true, entries will be sorted by rating.""" if sort_by_rating is not None: data["sort_by_rating"] = sort_by_rating # OPTIONAL - attachment """A multipart/form-data form-field-style attachment. Attachments larger than 1 kilobyte are subject to quota restrictions.""" if attachment is not None: data["attachment"] = attachment self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics".format(**path), data=data, params=params, no_data=True)
python
def create_new_discussion_topic_courses(self, course_id, allow_rating=None, assignment=None, attachment=None, delayed_post_at=None, discussion_type=None, group_category_id=None, is_announcement=None, lock_at=None, message=None, only_graders_can_rate=None, pinned=None, podcast_enabled=None, podcast_has_student_posts=None, position_after=None, published=None, require_initial_post=None, sort_by_rating=None, title=None): """ Create a new discussion topic. Create an new discussion topic for the course or group. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - title """no description""" if title is not None: data["title"] = title # OPTIONAL - message """no description""" if message is not None: data["message"] = message # OPTIONAL - discussion_type """The type of discussion. Defaults to side_comment if not value is given. Accepted values are 'side_comment', for discussions that only allow one level of nested comments, and 'threaded' for fully threaded discussions.""" if discussion_type is not None: self._validate_enum(discussion_type, ["side_comment", "threaded"]) data["discussion_type"] = discussion_type # OPTIONAL - published """Whether this topic is published (true) or draft state (false). Only teachers and TAs have the ability to create draft state topics.""" if published is not None: data["published"] = published # OPTIONAL - delayed_post_at """If a timestamp is given, the topic will not be published until that time.""" if delayed_post_at is not None: data["delayed_post_at"] = delayed_post_at # OPTIONAL - lock_at """If a timestamp is given, the topic will be scheduled to lock at the provided timestamp. If the timestamp is in the past, the topic will be locked.""" if lock_at is not None: data["lock_at"] = lock_at # OPTIONAL - podcast_enabled """If true, the topic will have an associated podcast feed.""" if podcast_enabled is not None: data["podcast_enabled"] = podcast_enabled # OPTIONAL - podcast_has_student_posts """If true, the podcast will include posts from students as well. Implies podcast_enabled.""" if podcast_has_student_posts is not None: data["podcast_has_student_posts"] = podcast_has_student_posts # OPTIONAL - require_initial_post """If true then a user may not respond to other replies until that user has made an initial reply. Defaults to false.""" if require_initial_post is not None: data["require_initial_post"] = require_initial_post # OPTIONAL - assignment """To create an assignment discussion, pass the assignment parameters as a sub-object. See the {api:AssignmentsApiController#create Create an Assignment API} for the available parameters. The name parameter will be ignored, as it's taken from the discussion title. If you want to make a discussion that was an assignment NOT an assignment, pass set_assignment = false as part of the assignment object""" if assignment is not None: data["assignment"] = assignment # OPTIONAL - is_announcement """If true, this topic is an announcement. It will appear in the announcement's section rather than the discussions section. This requires announcment-posting permissions.""" if is_announcement is not None: data["is_announcement"] = is_announcement # OPTIONAL - pinned """If true, this topic will be listed in the "Pinned Discussion" section""" if pinned is not None: data["pinned"] = pinned # OPTIONAL - position_after """By default, discussions are sorted chronologically by creation date, you can pass the id of another topic to have this one show up after the other when they are listed.""" if position_after is not None: data["position_after"] = position_after # OPTIONAL - group_category_id """If present, the topic will become a group discussion assigned to the group.""" if group_category_id is not None: data["group_category_id"] = group_category_id # OPTIONAL - allow_rating """If true, users will be allowed to rate entries.""" if allow_rating is not None: data["allow_rating"] = allow_rating # OPTIONAL - only_graders_can_rate """If true, only graders will be allowed to rate entries.""" if only_graders_can_rate is not None: data["only_graders_can_rate"] = only_graders_can_rate # OPTIONAL - sort_by_rating """If true, entries will be sorted by rating.""" if sort_by_rating is not None: data["sort_by_rating"] = sort_by_rating # OPTIONAL - attachment """A multipart/form-data form-field-style attachment. Attachments larger than 1 kilobyte are subject to quota restrictions.""" if attachment is not None: data["attachment"] = attachment self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics".format(**path), data=data, params=params, no_data=True)
[ "def", "create_new_discussion_topic_courses", "(", "self", ",", "course_id", ",", "allow_rating", "=", "None", ",", "assignment", "=", "None", ",", "attachment", "=", "None", ",", "delayed_post_at", "=", "None", ",", "discussion_type", "=", "None", ",", "group_c...
Create a new discussion topic. Create an new discussion topic for the course or group.
[ "Create", "a", "new", "discussion", "topic", ".", "Create", "an", "new", "discussion", "topic", "for", "the", "course", "or", "group", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/discussion_topics.py#L129-L251
PGower/PyCanvas
pycanvas/apis/discussion_topics.py
DiscussionTopicsAPI.delete_topic_groups
def delete_topic_groups(self, group_id, topic_id): """ Delete a topic. Deletes the discussion topic. This will also delete the assignment, if it's an assignment discussion. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id self.logger.debug("DELETE /api/v1/groups/{group_id}/discussion_topics/{topic_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/groups/{group_id}/discussion_topics/{topic_id}".format(**path), data=data, params=params, no_data=True)
python
def delete_topic_groups(self, group_id, topic_id): """ Delete a topic. Deletes the discussion topic. This will also delete the assignment, if it's an assignment discussion. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id self.logger.debug("DELETE /api/v1/groups/{group_id}/discussion_topics/{topic_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/groups/{group_id}/discussion_topics/{topic_id}".format(**path), data=data, params=params, no_data=True)
[ "def", "delete_topic_groups", "(", "self", ",", "group_id", ",", "topic_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"group_id\"", "]", "=", "group_id...
Delete a topic. Deletes the discussion topic. This will also delete the assignment, if it's an assignment discussion.
[ "Delete", "a", "topic", ".", "Deletes", "the", "discussion", "topic", ".", "This", "will", "also", "delete", "the", "assignment", "if", "it", "s", "an", "assignment", "discussion", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/discussion_topics.py#L643-L663
PGower/PyCanvas
pycanvas/apis/discussion_topics.py
DiscussionTopicsAPI.reorder_pinned_topics_courses
def reorder_pinned_topics_courses(self, order, course_id): """ Reorder pinned topics. Puts the pinned discussion topics in the specified order. All pinned topics should be included. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - order """The ids of the pinned discussion topics in the desired order. (For example, "order=104,102,103".)""" data["order"] = order self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics/reorder with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics/reorder".format(**path), data=data, params=params, no_data=True)
python
def reorder_pinned_topics_courses(self, order, course_id): """ Reorder pinned topics. Puts the pinned discussion topics in the specified order. All pinned topics should be included. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - order """The ids of the pinned discussion topics in the desired order. (For example, "order=104,102,103".)""" data["order"] = order self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics/reorder with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics/reorder".format(**path), data=data, params=params, no_data=True)
[ "def", "reorder_pinned_topics_courses", "(", "self", ",", "order", ",", "course_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"course_id\"", "]", "=", ...
Reorder pinned topics. Puts the pinned discussion topics in the specified order. All pinned topics should be included.
[ "Reorder", "pinned", "topics", ".", "Puts", "the", "pinned", "discussion", "topics", "in", "the", "specified", "order", ".", "All", "pinned", "topics", "should", "be", "included", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/discussion_topics.py#L665-L686
PGower/PyCanvas
pycanvas/apis/discussion_topics.py
DiscussionTopicsAPI.reorder_pinned_topics_groups
def reorder_pinned_topics_groups(self, order, group_id): """ Reorder pinned topics. Puts the pinned discussion topics in the specified order. All pinned topics should be included. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - order """The ids of the pinned discussion topics in the desired order. (For example, "order=104,102,103".)""" data["order"] = order self.logger.debug("POST /api/v1/groups/{group_id}/discussion_topics/reorder with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/groups/{group_id}/discussion_topics/reorder".format(**path), data=data, params=params, no_data=True)
python
def reorder_pinned_topics_groups(self, order, group_id): """ Reorder pinned topics. Puts the pinned discussion topics in the specified order. All pinned topics should be included. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - order """The ids of the pinned discussion topics in the desired order. (For example, "order=104,102,103".)""" data["order"] = order self.logger.debug("POST /api/v1/groups/{group_id}/discussion_topics/reorder with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/groups/{group_id}/discussion_topics/reorder".format(**path), data=data, params=params, no_data=True)
[ "def", "reorder_pinned_topics_groups", "(", "self", ",", "order", ",", "group_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"group_id\"", "]", "=", "gr...
Reorder pinned topics. Puts the pinned discussion topics in the specified order. All pinned topics should be included.
[ "Reorder", "pinned", "topics", ".", "Puts", "the", "pinned", "discussion", "topics", "in", "the", "specified", "order", ".", "All", "pinned", "topics", "should", "be", "included", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/discussion_topics.py#L688-L709
PGower/PyCanvas
pycanvas/apis/discussion_topics.py
DiscussionTopicsAPI.post_entry_courses
def post_entry_courses(self, topic_id, course_id, attachment=None, message=None): """ Post an entry. Create a new entry in a discussion topic. Returns a json representation of the created entry (see documentation for 'entries' method) on success. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id # OPTIONAL - message """The body of the entry.""" if message is not None: data["message"] = message # OPTIONAL - attachment """a multipart/form-data form-field-style attachment. Attachments larger than 1 kilobyte are subject to quota restrictions.""" if attachment is not None: data["attachment"] = attachment self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries".format(**path), data=data, params=params, no_data=True)
python
def post_entry_courses(self, topic_id, course_id, attachment=None, message=None): """ Post an entry. Create a new entry in a discussion topic. Returns a json representation of the created entry (see documentation for 'entries' method) on success. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id # OPTIONAL - message """The body of the entry.""" if message is not None: data["message"] = message # OPTIONAL - attachment """a multipart/form-data form-field-style attachment. Attachments larger than 1 kilobyte are subject to quota restrictions.""" if attachment is not None: data["attachment"] = attachment self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries".format(**path), data=data, params=params, no_data=True)
[ "def", "post_entry_courses", "(", "self", ",", "topic_id", ",", "course_id", ",", "attachment", "=", "None", ",", "message", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", ...
Post an entry. Create a new entry in a discussion topic. Returns a json representation of the created entry (see documentation for 'entries' method) on success.
[ "Post", "an", "entry", ".", "Create", "a", "new", "entry", "in", "a", "discussion", "topic", ".", "Returns", "a", "json", "representation", "of", "the", "created", "entry", "(", "see", "documentation", "for", "entries", "method", ")", "on", "success", "." ...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/discussion_topics.py#L981-L1013
PGower/PyCanvas
pycanvas/apis/discussion_topics.py
DiscussionTopicsAPI.post_reply_groups
def post_reply_groups(self, group_id, topic_id, entry_id, attachment=None, message=None): """ Post a reply. Add a reply to an entry in a discussion topic. Returns a json representation of the created reply (see documentation for 'replies' method) on success. May require (depending on the topic) that the user has posted in the topic. If it is required, and the user has not posted, will respond with a 403 Forbidden status and the body 'require_initial_post'. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id # REQUIRED - PATH - entry_id """ID""" path["entry_id"] = entry_id # OPTIONAL - message """The body of the entry.""" if message is not None: data["message"] = message # OPTIONAL - attachment """a multipart/form-data form-field-style attachment. Attachments larger than 1 kilobyte are subject to quota restrictions.""" if attachment is not None: data["attachment"] = attachment self.logger.debug("POST /api/v1/groups/{group_id}/discussion_topics/{topic_id}/entries/{entry_id}/replies with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/groups/{group_id}/discussion_topics/{topic_id}/entries/{entry_id}/replies".format(**path), data=data, params=params, no_data=True)
python
def post_reply_groups(self, group_id, topic_id, entry_id, attachment=None, message=None): """ Post a reply. Add a reply to an entry in a discussion topic. Returns a json representation of the created reply (see documentation for 'replies' method) on success. May require (depending on the topic) that the user has posted in the topic. If it is required, and the user has not posted, will respond with a 403 Forbidden status and the body 'require_initial_post'. """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id # REQUIRED - PATH - entry_id """ID""" path["entry_id"] = entry_id # OPTIONAL - message """The body of the entry.""" if message is not None: data["message"] = message # OPTIONAL - attachment """a multipart/form-data form-field-style attachment. Attachments larger than 1 kilobyte are subject to quota restrictions.""" if attachment is not None: data["attachment"] = attachment self.logger.debug("POST /api/v1/groups/{group_id}/discussion_topics/{topic_id}/entries/{entry_id}/replies with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/groups/{group_id}/discussion_topics/{topic_id}/entries/{entry_id}/replies".format(**path), data=data, params=params, no_data=True)
[ "def", "post_reply_groups", "(", "self", ",", "group_id", ",", "topic_id", ",", "entry_id", ",", "attachment", "=", "None", ",", "message", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH...
Post a reply. Add a reply to an entry in a discussion topic. Returns a json representation of the created reply (see documentation for 'replies' method) on success. May require (depending on the topic) that the user has posted in the topic. If it is required, and the user has not posted, will respond with a 403 Forbidden status and the body 'require_initial_post'.
[ "Post", "a", "reply", ".", "Add", "a", "reply", "to", "an", "entry", "in", "a", "discussion", "topic", ".", "Returns", "a", "json", "representation", "of", "the", "created", "reply", "(", "see", "documentation", "for", "replies", "method", ")", "on", "su...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/discussion_topics.py#L1160-L1201
PGower/PyCanvas
pycanvas/apis/discussion_topics.py
DiscussionTopicsAPI.rate_entry_courses
def rate_entry_courses(self, topic_id, entry_id, course_id, rating=None): """ Rate entry. Rate a discussion entry. On success, the response will be 204 No Content with an empty body. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id # REQUIRED - PATH - entry_id """ID""" path["entry_id"] = entry_id # OPTIONAL - rating """A rating to set on this entry. Only 0 and 1 are accepted.""" if rating is not None: data["rating"] = rating self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries/{entry_id}/rating with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries/{entry_id}/rating".format(**path), data=data, params=params, no_data=True)
python
def rate_entry_courses(self, topic_id, entry_id, course_id, rating=None): """ Rate entry. Rate a discussion entry. On success, the response will be 204 No Content with an empty body. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - topic_id """ID""" path["topic_id"] = topic_id # REQUIRED - PATH - entry_id """ID""" path["entry_id"] = entry_id # OPTIONAL - rating """A rating to set on this entry. Only 0 and 1 are accepted.""" if rating is not None: data["rating"] = rating self.logger.debug("POST /api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries/{entry_id}/rating with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/courses/{course_id}/discussion_topics/{topic_id}/entries/{entry_id}/rating".format(**path), data=data, params=params, no_data=True)
[ "def", "rate_entry_courses", "(", "self", ",", "topic_id", ",", "entry_id", ",", "course_id", ",", "rating", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\"\"\"ID\"\"\"", ...
Rate entry. Rate a discussion entry. On success, the response will be 204 No Content with an empty body.
[ "Rate", "entry", ".", "Rate", "a", "discussion", "entry", ".", "On", "success", "the", "response", "will", "be", "204", "No", "Content", "with", "an", "empty", "body", "." ]
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/discussion_topics.py#L1693-L1723
coagulant/critics
critics/commands.py
cli
def cli(**settings): """Notify about new reviews in AppStore and Google Play in slack. Launch command using supervisor or using screen/tmux/etc. Reviews are fetched for multiple apps and languages in --beat=300 interval. """ setup_logging(settings) settings = setup_languages(settings) channels = setup_channel_map(settings) app = CriticApp(**dict(settings, channels=channels)) if settings['sentry_dsn']: app.sentry_client = Client(settings['sentry_dsn']) logger.debug('Errors are reported to %s' % settings['sentry_dsn']) else: app.sentry_client = None if settings['version']: click.echo('Version %s' % critics.__version__) return if not (settings['ios'] or settings['android']): click.echo('Please choose either --ios or --android') return loop = tornado.ioloop.IOLoop.instance() if app.load_model(): logger.debug('Model loaded OK, not skipping notify on first run') notify = True else: notify = False if settings['ios']: logger.info('Tracking IOS apps: %s', ', '.join(settings['ios'])) itunes = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'ios'), 1000 * settings['beat'], loop) itunes.start() if settings['android']: logger.info('Tracking Android apps: %s', ', '.join(settings['android'])) google_play = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'android'), 1000 * settings['beat'], loop) google_play.start() echo_channel_map(channels) if settings['ios']: app.poll_store('ios', notify=notify) if settings['android']: app.poll_store('android', notify=notify) if settings['stats']: port = int(settings['stats']) logger.debug('Serving metrics server on port %s' % port) start_http_server(port) if settings['daemonize']: loop.start()
python
def cli(**settings): """Notify about new reviews in AppStore and Google Play in slack. Launch command using supervisor or using screen/tmux/etc. Reviews are fetched for multiple apps and languages in --beat=300 interval. """ setup_logging(settings) settings = setup_languages(settings) channels = setup_channel_map(settings) app = CriticApp(**dict(settings, channels=channels)) if settings['sentry_dsn']: app.sentry_client = Client(settings['sentry_dsn']) logger.debug('Errors are reported to %s' % settings['sentry_dsn']) else: app.sentry_client = None if settings['version']: click.echo('Version %s' % critics.__version__) return if not (settings['ios'] or settings['android']): click.echo('Please choose either --ios or --android') return loop = tornado.ioloop.IOLoop.instance() if app.load_model(): logger.debug('Model loaded OK, not skipping notify on first run') notify = True else: notify = False if settings['ios']: logger.info('Tracking IOS apps: %s', ', '.join(settings['ios'])) itunes = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'ios'), 1000 * settings['beat'], loop) itunes.start() if settings['android']: logger.info('Tracking Android apps: %s', ', '.join(settings['android'])) google_play = tornado.ioloop.PeriodicCallback(partial(app.poll_store, 'android'), 1000 * settings['beat'], loop) google_play.start() echo_channel_map(channels) if settings['ios']: app.poll_store('ios', notify=notify) if settings['android']: app.poll_store('android', notify=notify) if settings['stats']: port = int(settings['stats']) logger.debug('Serving metrics server on port %s' % port) start_http_server(port) if settings['daemonize']: loop.start()
[ "def", "cli", "(", "*", "*", "settings", ")", ":", "setup_logging", "(", "settings", ")", "settings", "=", "setup_languages", "(", "settings", ")", "channels", "=", "setup_channel_map", "(", "settings", ")", "app", "=", "CriticApp", "(", "*", "*", "dict", ...
Notify about new reviews in AppStore and Google Play in slack. Launch command using supervisor or using screen/tmux/etc. Reviews are fetched for multiple apps and languages in --beat=300 interval.
[ "Notify", "about", "new", "reviews", "in", "AppStore", "and", "Google", "Play", "in", "slack", "." ]
train
https://github.com/coagulant/critics/blob/506643f79c5adac8200b078780c5d1c4eb7cc95c/critics/commands.py#L40-L95
karel-brinda/rnftools
rnftools/mishmash/Source.py
Source.create_fa
def create_fa(self): """Create a FASTA file with extracted sequences. """ if self._seqs is None: os.symlink(self._fa0_fn, self._fa_fn) else: in_seqs = pyfaidx.Fasta(self._fa0_fn) with open(self._fa_fn, "w+") as g: for seq_desc in self._seqs: x = in_seqs[seq_desc] name, seq = x.name, str(x) g.write(">" + name + "\n") n = 80 seq_split = "\n".join([seq[i:i + n] for i in range(0, len(seq), n)]) g.write(seq_split + "\n")
python
def create_fa(self): """Create a FASTA file with extracted sequences. """ if self._seqs is None: os.symlink(self._fa0_fn, self._fa_fn) else: in_seqs = pyfaidx.Fasta(self._fa0_fn) with open(self._fa_fn, "w+") as g: for seq_desc in self._seqs: x = in_seqs[seq_desc] name, seq = x.name, str(x) g.write(">" + name + "\n") n = 80 seq_split = "\n".join([seq[i:i + n] for i in range(0, len(seq), n)]) g.write(seq_split + "\n")
[ "def", "create_fa", "(", "self", ")", ":", "if", "self", ".", "_seqs", "is", "None", ":", "os", ".", "symlink", "(", "self", ".", "_fa0_fn", ",", "self", ".", "_fa_fn", ")", "else", ":", "in_seqs", "=", "pyfaidx", ".", "Fasta", "(", "self", ".", ...
Create a FASTA file with extracted sequences.
[ "Create", "a", "FASTA", "file", "with", "extracted", "sequences", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/mishmash/Source.py#L142-L158
karel-brinda/rnftools
rnftools/mishmash/Source.py
Source.recode_sam_reads
def recode_sam_reads( sam_fn, fastq_rnf_fo, fai_fo, genome_id, number_of_read_tuples=10**9, simulator_name=None, allow_unmapped=False, ): """Transform a SAM file to RNF-compatible FASTQ. Args: sam_fn (str): SAM/BAM file - file name. fastq_rnf_fo (str): Output FASTQ file - file object. fai_fo (str): FAI index of the reference genome - file object. genome_id (int): Genome ID for RNF. number_of_read_tuples (int): Expected number of read tuples (to set width of read tuple id). simulator_name (str): Name of the simulator. Used for comment in read tuple name. allow_unmapped (bool): Allow unmapped reads. Raises: NotImplementedError """ fai_index = rnftools.utils.FaIdx(fai_fo) # last_read_tuple_name=[] read_tuple_id_width = len(format(number_of_read_tuples, 'x')) fq_creator = rnftools.rnfformat.FqCreator( fastq_fo=fastq_rnf_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator=simulator_name, ) # todo: check if clipping corrections is well implemented cigar_reg_shift = re.compile("([0-9]+)([MDNP=X])") # todo: other upac codes reverse_complement_dict = { "A": "T", "T": "A", "C": "G", "G": "C", "N": "N", } read_tuple_id = 0 last_read_tuple_name = None with pysam.AlignmentFile( sam_fn, check_header=False, ) as samfile: for alignment in samfile: if alignment.query_name != last_read_tuple_name and last_read_tuple_name is not None: read_tuple_id += 1 last_read_tuple_name = alignment.query_name if alignment.is_unmapped: rnftools.utils.error( "SAM files used for conversion should not contain unaligned segments. " "This condition is broken by read tuple " "'{}' in file '{}'.".format(alignment.query_name, sam_fn), program="RNFtools", subprogram="MIShmash", exception=NotImplementedError, ) if alignment.is_reverse: direction = "R" bases = "".join([reverse_complement_dict[nucl] for nucl in alignment.seq[::-1]]) qualities = str(alignment.qual[::-1]) else: direction = "F" bases = alignment.seq[:] qualities = str(alignment.qual[:]) # todo: are chromosomes in bam sorted correctly (the same order as in FASTA)? if fai_index.dict_chr_ids != {}: chr_id = fai_index.dict_chr_ids[samfile.getrname(alignment.reference_id)] else: chr_id = "0" left = int(alignment.reference_start) + 1 right = left - 1 for (steps, operation) in cigar_reg_shift.findall(alignment.cigarstring): right += int(steps) segment = rnftools.rnfformat.Segment( genome_id=genome_id, chr_id=chr_id, direction=direction, left=left, right=right, ) fq_creator.add_read( read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment], ) fq_creator.flush_read_tuple()
python
def recode_sam_reads( sam_fn, fastq_rnf_fo, fai_fo, genome_id, number_of_read_tuples=10**9, simulator_name=None, allow_unmapped=False, ): """Transform a SAM file to RNF-compatible FASTQ. Args: sam_fn (str): SAM/BAM file - file name. fastq_rnf_fo (str): Output FASTQ file - file object. fai_fo (str): FAI index of the reference genome - file object. genome_id (int): Genome ID for RNF. number_of_read_tuples (int): Expected number of read tuples (to set width of read tuple id). simulator_name (str): Name of the simulator. Used for comment in read tuple name. allow_unmapped (bool): Allow unmapped reads. Raises: NotImplementedError """ fai_index = rnftools.utils.FaIdx(fai_fo) # last_read_tuple_name=[] read_tuple_id_width = len(format(number_of_read_tuples, 'x')) fq_creator = rnftools.rnfformat.FqCreator( fastq_fo=fastq_rnf_fo, read_tuple_id_width=read_tuple_id_width, genome_id_width=2, chr_id_width=fai_index.chr_id_width, coor_width=fai_index.coor_width, info_reads_in_tuple=True, info_simulator=simulator_name, ) # todo: check if clipping corrections is well implemented cigar_reg_shift = re.compile("([0-9]+)([MDNP=X])") # todo: other upac codes reverse_complement_dict = { "A": "T", "T": "A", "C": "G", "G": "C", "N": "N", } read_tuple_id = 0 last_read_tuple_name = None with pysam.AlignmentFile( sam_fn, check_header=False, ) as samfile: for alignment in samfile: if alignment.query_name != last_read_tuple_name and last_read_tuple_name is not None: read_tuple_id += 1 last_read_tuple_name = alignment.query_name if alignment.is_unmapped: rnftools.utils.error( "SAM files used for conversion should not contain unaligned segments. " "This condition is broken by read tuple " "'{}' in file '{}'.".format(alignment.query_name, sam_fn), program="RNFtools", subprogram="MIShmash", exception=NotImplementedError, ) if alignment.is_reverse: direction = "R" bases = "".join([reverse_complement_dict[nucl] for nucl in alignment.seq[::-1]]) qualities = str(alignment.qual[::-1]) else: direction = "F" bases = alignment.seq[:] qualities = str(alignment.qual[:]) # todo: are chromosomes in bam sorted correctly (the same order as in FASTA)? if fai_index.dict_chr_ids != {}: chr_id = fai_index.dict_chr_ids[samfile.getrname(alignment.reference_id)] else: chr_id = "0" left = int(alignment.reference_start) + 1 right = left - 1 for (steps, operation) in cigar_reg_shift.findall(alignment.cigarstring): right += int(steps) segment = rnftools.rnfformat.Segment( genome_id=genome_id, chr_id=chr_id, direction=direction, left=left, right=right, ) fq_creator.add_read( read_tuple_id=read_tuple_id, bases=bases, qualities=qualities, segments=[segment], ) fq_creator.flush_read_tuple()
[ "def", "recode_sam_reads", "(", "sam_fn", ",", "fastq_rnf_fo", ",", "fai_fo", ",", "genome_id", ",", "number_of_read_tuples", "=", "10", "**", "9", ",", "simulator_name", "=", "None", ",", "allow_unmapped", "=", "False", ",", ")", ":", "fai_index", "=", "rnf...
Transform a SAM file to RNF-compatible FASTQ. Args: sam_fn (str): SAM/BAM file - file name. fastq_rnf_fo (str): Output FASTQ file - file object. fai_fo (str): FAI index of the reference genome - file object. genome_id (int): Genome ID for RNF. number_of_read_tuples (int): Expected number of read tuples (to set width of read tuple id). simulator_name (str): Name of the simulator. Used for comment in read tuple name. allow_unmapped (bool): Allow unmapped reads. Raises: NotImplementedError
[ "Transform", "a", "SAM", "file", "to", "RNF", "-", "compatible", "FASTQ", "." ]
train
https://github.com/karel-brinda/rnftools/blob/25510798606fbc803a622a1abfcecf06d00d47a9/rnftools/mishmash/Source.py#L161-L265
20c/vodka
vodka/instance.py
instantiate
def instantiate(config): """ instantiate all registered vodka applications Args: config (dict or MungeConfig): configuration object """ for handle, cfg in list(config["apps"].items()): if not cfg.get("enabled", True): continue app = get_application(handle) instances[app.handle] = app(cfg)
python
def instantiate(config): """ instantiate all registered vodka applications Args: config (dict or MungeConfig): configuration object """ for handle, cfg in list(config["apps"].items()): if not cfg.get("enabled", True): continue app = get_application(handle) instances[app.handle] = app(cfg)
[ "def", "instantiate", "(", "config", ")", ":", "for", "handle", ",", "cfg", "in", "list", "(", "config", "[", "\"apps\"", "]", ".", "items", "(", ")", ")", ":", "if", "not", "cfg", ".", "get", "(", "\"enabled\"", ",", "True", ")", ":", "continue", ...
instantiate all registered vodka applications Args: config (dict or MungeConfig): configuration object
[ "instantiate", "all", "registered", "vodka", "applications" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/instance.py#L23-L36
Sanji-IO/sanji
sanji/router.py
compile_resource
def compile_resource(resource): """ Return compiled regex for resource matching """ return re.compile("^" + trim_resource(re.sub(r":(\w+)", r"(?P<\1>[\w-]+?)", resource)) + r"(\?(?P<querystring>.*))?$")
python
def compile_resource(resource): """ Return compiled regex for resource matching """ return re.compile("^" + trim_resource(re.sub(r":(\w+)", r"(?P<\1>[\w-]+?)", resource)) + r"(\?(?P<querystring>.*))?$")
[ "def", "compile_resource", "(", "resource", ")", ":", "return", "re", ".", "compile", "(", "\"^\"", "+", "trim_resource", "(", "re", ".", "sub", "(", "r\":(\\w+)\"", ",", "r\"(?P<\\1>[\\w-]+?)\"", ",", "resource", ")", ")", "+", "r\"(\\?(?P<querystring>.*))?$\""...
Return compiled regex for resource matching
[ "Return", "compiled", "regex", "for", "resource", "matching" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/router.py#L9-L14
Sanji-IO/sanji
sanji/router.py
Route.create_handler_func
def create_handler_func(self, method): """ create_handler_func """ def _handler(callback, schema=None): """ _handler """ # reentrant default is False [POST, DELETE, PUT] reentrant = False if method == "get": reentrant = True self.handlers.append({ "method": method, "callback": callback, "schema": schema, "reentrant": reentrant }) return self return _handler
python
def create_handler_func(self, method): """ create_handler_func """ def _handler(callback, schema=None): """ _handler """ # reentrant default is False [POST, DELETE, PUT] reentrant = False if method == "get": reentrant = True self.handlers.append({ "method": method, "callback": callback, "schema": schema, "reentrant": reentrant }) return self return _handler
[ "def", "create_handler_func", "(", "self", ",", "method", ")", ":", "def", "_handler", "(", "callback", ",", "schema", "=", "None", ")", ":", "\"\"\"\n _handler\n \"\"\"", "# reentrant default is False [POST, DELETE, PUT]", "reentrant", "=", "False",...
create_handler_func
[ "create_handler_func" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/router.py#L30-L51
Sanji-IO/sanji
sanji/router.py
Route.dispatch
def dispatch(self, message): """ dispatch """ handlers = [] for handler in self.handlers: if handler["method"] != message.method: continue handlers.append(handler) return handlers
python
def dispatch(self, message): """ dispatch """ handlers = [] for handler in self.handlers: if handler["method"] != message.method: continue handlers.append(handler) return handlers
[ "def", "dispatch", "(", "self", ",", "message", ")", ":", "handlers", "=", "[", "]", "for", "handler", "in", "self", ".", "handlers", ":", "if", "handler", "[", "\"method\"", "]", "!=", "message", ".", "method", ":", "continue", "handlers", ".", "appen...
dispatch
[ "dispatch" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/router.py#L53-L63
Sanji-IO/sanji
sanji/router.py
Router.route
def route(self, resource): """ route """ route = self.routes.get(resource, Route(resource)) self.routes.update({resource: route}) return route
python
def route(self, resource): """ route """ route = self.routes.get(resource, Route(resource)) self.routes.update({resource: route}) return route
[ "def", "route", "(", "self", ",", "resource", ")", ":", "route", "=", "self", ".", "routes", ".", "get", "(", "resource", ",", "Route", "(", "resource", ")", ")", "self", ".", "routes", ".", "update", "(", "{", "resource", ":", "route", "}", ")", ...
route
[ "route" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/router.py#L82-L88
Sanji-IO/sanji
sanji/router.py
Router.create_route_func
def create_route_func(self, method): """ create_route_func """ def _route(resource, handler, schema=None): """ _route """ route = self.routes.get(resource, Route(resource)) route.__getattribute__(method)(handler, schema) self.routes.update({resource: route}) return self return _route
python
def create_route_func(self, method): """ create_route_func """ def _route(resource, handler, schema=None): """ _route """ route = self.routes.get(resource, Route(resource)) route.__getattribute__(method)(handler, schema) self.routes.update({resource: route}) return self return _route
[ "def", "create_route_func", "(", "self", ",", "method", ")", ":", "def", "_route", "(", "resource", ",", "handler", ",", "schema", "=", "None", ")", ":", "\"\"\"\n _route\n \"\"\"", "route", "=", "self", ".", "routes", ".", "get", "(", ...
create_route_func
[ "create_route_func" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/router.py#L90-L103
Sanji-IO/sanji
sanji/router.py
Router.dispatch
def dispatch(self, message): """ dispatch """ results = [] # match routes for resource, route in self.routes.items(): __message = message.match(route) if __message is None: continue route_result = route.dispatch(__message) if len(route_result) == 0: continue results.append({ "handlers": route_result, "message": __message }) return results
python
def dispatch(self, message): """ dispatch """ results = [] # match routes for resource, route in self.routes.items(): __message = message.match(route) if __message is None: continue route_result = route.dispatch(__message) if len(route_result) == 0: continue results.append({ "handlers": route_result, "message": __message }) return results
[ "def", "dispatch", "(", "self", ",", "message", ")", ":", "results", "=", "[", "]", "# match routes", "for", "resource", ",", "route", "in", "self", ".", "routes", ".", "items", "(", ")", ":", "__message", "=", "message", ".", "match", "(", "route", ...
dispatch
[ "dispatch" ]
train
https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/router.py#L105-L125
20c/vodka
vodka/data/__init__.py
handle
def handle(data_type, data, data_id=None, caller=None): """ execute all data handlers on the specified data according to data type Args: data_type (str): data type handle data (dict or list): data Kwargs: data_id (str): can be used to differentiate between different data sets of the same data type. If not specified will default to the data type caller (object): if specified, holds the object or function that is trying to handle data Returns: dict or list - data after handlers have been executed on it """ if not data_id: data_id = data_type # instantiate handlers for data type if they havent been yet if data_id not in _handlers: _handlers[data_id] = dict( [(h.handle, h) for h in handlers.instantiate_for_data_type(data_type, data_id=data_id)]) for handler in list(_handlers[data_id].values()): try: data = handler(data, caller=caller) except Exception as inst: vodka.log.error("Data handler '%s' failed with error" % handler) vodka.log.error(traceback.format_exc()) return data
python
def handle(data_type, data, data_id=None, caller=None): """ execute all data handlers on the specified data according to data type Args: data_type (str): data type handle data (dict or list): data Kwargs: data_id (str): can be used to differentiate between different data sets of the same data type. If not specified will default to the data type caller (object): if specified, holds the object or function that is trying to handle data Returns: dict or list - data after handlers have been executed on it """ if not data_id: data_id = data_type # instantiate handlers for data type if they havent been yet if data_id not in _handlers: _handlers[data_id] = dict( [(h.handle, h) for h in handlers.instantiate_for_data_type(data_type, data_id=data_id)]) for handler in list(_handlers[data_id].values()): try: data = handler(data, caller=caller) except Exception as inst: vodka.log.error("Data handler '%s' failed with error" % handler) vodka.log.error(traceback.format_exc()) return data
[ "def", "handle", "(", "data_type", ",", "data", ",", "data_id", "=", "None", ",", "caller", "=", "None", ")", ":", "if", "not", "data_id", ":", "data_id", "=", "data_type", "# instantiate handlers for data type if they havent been yet", "if", "data_id", "not", "...
execute all data handlers on the specified data according to data type Args: data_type (str): data type handle data (dict or list): data Kwargs: data_id (str): can be used to differentiate between different data sets of the same data type. If not specified will default to the data type caller (object): if specified, holds the object or function that is trying to handle data Returns: dict or list - data after handlers have been executed on it
[ "execute", "all", "data", "handlers", "on", "the", "specified", "data", "according", "to", "data", "type" ]
train
https://github.com/20c/vodka/blob/9615148ac6560298453704bb5246b35b66b3339c/vodka/data/__init__.py#L7-L41
theonion/django-bulbs
bulbs/reading_list/mixins.py
ReadingListMixin.validate_query
def validate_query(self, query): """Confirm query exists given common filters.""" if query is None: return query query = self.update_reading_list(query) return query
python
def validate_query(self, query): """Confirm query exists given common filters.""" if query is None: return query query = self.update_reading_list(query) return query
[ "def", "validate_query", "(", "self", ",", "query", ")", ":", "if", "query", "is", "None", ":", "return", "query", "query", "=", "self", ".", "update_reading_list", "(", "query", ")", "return", "query" ]
Confirm query exists given common filters.
[ "Confirm", "query", "exists", "given", "common", "filters", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/reading_list/mixins.py#L55-L60
theonion/django-bulbs
bulbs/reading_list/mixins.py
ReadingListMixin.get_validated_augment_query
def get_validated_augment_query(self, augment_query=None): """ Common rules for reading list augmentation hierarchy. 1. Sponsored Content. 2. Video Content. """ augment_query = self.validate_query(augment_query) # Given an invalid query, reach for a Sponsored query. if not augment_query: augment_query = self.validate_query(Content.search_objects.sponsored()) # Given an invalid Sponsored query, reach for a Video query. if not augment_query: reading_list_config = getattr(settings, "READING_LIST_CONFIG", {}) excluded_channel_ids = reading_list_config.get("excluded_channel_ids", []) augment_query = self.validate_query(Content.search_objects.evergreen_video( excluded_channel_ids=excluded_channel_ids )) return augment_query
python
def get_validated_augment_query(self, augment_query=None): """ Common rules for reading list augmentation hierarchy. 1. Sponsored Content. 2. Video Content. """ augment_query = self.validate_query(augment_query) # Given an invalid query, reach for a Sponsored query. if not augment_query: augment_query = self.validate_query(Content.search_objects.sponsored()) # Given an invalid Sponsored query, reach for a Video query. if not augment_query: reading_list_config = getattr(settings, "READING_LIST_CONFIG", {}) excluded_channel_ids = reading_list_config.get("excluded_channel_ids", []) augment_query = self.validate_query(Content.search_objects.evergreen_video( excluded_channel_ids=excluded_channel_ids )) return augment_query
[ "def", "get_validated_augment_query", "(", "self", ",", "augment_query", "=", "None", ")", ":", "augment_query", "=", "self", ".", "validate_query", "(", "augment_query", ")", "# Given an invalid query, reach for a Sponsored query.", "if", "not", "augment_query", ":", "...
Common rules for reading list augmentation hierarchy. 1. Sponsored Content. 2. Video Content.
[ "Common", "rules", "for", "reading", "list", "augmentation", "hierarchy", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/reading_list/mixins.py#L62-L83
theonion/django-bulbs
bulbs/reading_list/mixins.py
ReadingListMixin.augment_reading_list
def augment_reading_list(self, primary_query, augment_query=None, reverse_negate=False): """Apply injected logic for slicing reading lists with additional content.""" primary_query = self.validate_query(primary_query) augment_query = self.get_validated_augment_query(augment_query=augment_query) try: # We use this for cases like recent where queries are vague. if reverse_negate: primary_query = primary_query.filter(NegateQueryFilter(augment_query)) else: augment_query = augment_query.filter(NegateQueryFilter(primary_query)) augment_query = randomize_es(augment_query) return FirstSlotSlicer(primary_query, augment_query) except TransportError: return primary_query
python
def augment_reading_list(self, primary_query, augment_query=None, reverse_negate=False): """Apply injected logic for slicing reading lists with additional content.""" primary_query = self.validate_query(primary_query) augment_query = self.get_validated_augment_query(augment_query=augment_query) try: # We use this for cases like recent where queries are vague. if reverse_negate: primary_query = primary_query.filter(NegateQueryFilter(augment_query)) else: augment_query = augment_query.filter(NegateQueryFilter(primary_query)) augment_query = randomize_es(augment_query) return FirstSlotSlicer(primary_query, augment_query) except TransportError: return primary_query
[ "def", "augment_reading_list", "(", "self", ",", "primary_query", ",", "augment_query", "=", "None", ",", "reverse_negate", "=", "False", ")", ":", "primary_query", "=", "self", ".", "validate_query", "(", "primary_query", ")", "augment_query", "=", "self", ".",...
Apply injected logic for slicing reading lists with additional content.
[ "Apply", "injected", "logic", "for", "slicing", "reading", "lists", "with", "additional", "content", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/reading_list/mixins.py#L85-L99
theonion/django-bulbs
bulbs/reading_list/mixins.py
ReadingListMixin.update_reading_list
def update_reading_list(self, reading_list): """Generic behaviors for reading lists before being rendered.""" # remove the current piece of content from the query. reading_list = reading_list.filter( ~es_filter.Ids(values=[self.id]) ) # remove excluded document types from the query. reading_list_config = getattr(settings, "READING_LIST_CONFIG", {}) excluded_doc_types = reading_list_config.get("excluded_doc_types", []) for obj in excluded_doc_types: reading_list = reading_list.filter(~es_filter.Type(value=obj)) return reading_list
python
def update_reading_list(self, reading_list): """Generic behaviors for reading lists before being rendered.""" # remove the current piece of content from the query. reading_list = reading_list.filter( ~es_filter.Ids(values=[self.id]) ) # remove excluded document types from the query. reading_list_config = getattr(settings, "READING_LIST_CONFIG", {}) excluded_doc_types = reading_list_config.get("excluded_doc_types", []) for obj in excluded_doc_types: reading_list = reading_list.filter(~es_filter.Type(value=obj)) return reading_list
[ "def", "update_reading_list", "(", "self", ",", "reading_list", ")", ":", "# remove the current piece of content from the query.", "reading_list", "=", "reading_list", ".", "filter", "(", "~", "es_filter", ".", "Ids", "(", "values", "=", "[", "self", ".", "id", "]...
Generic behaviors for reading lists before being rendered.
[ "Generic", "behaviors", "for", "reading", "lists", "before", "being", "rendered", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/reading_list/mixins.py#L117-L131
theonion/django-bulbs
bulbs/reading_list/mixins.py
ReadingListMixin.get_reading_list_context
def get_reading_list_context(self, **kwargs): """Returns the context dictionary for a given reading list.""" reading_list = None context = { "name": "", "content": reading_list, "targeting": {}, "videos": [] } if self.reading_list_identifier == "popular": reading_list = popular_content() context.update({"name": self.reading_list_identifier}) # Popular is augmented. reading_list = self.augment_reading_list(reading_list) context.update({"content": reading_list}) return context if self.reading_list_identifier.startswith("specialcoverage"): special_coverage = SpecialCoverage.objects.get_by_identifier( self.reading_list_identifier ) reading_list = special_coverage.get_content().query( SponsoredBoost(field_name="tunic_campaign_id") ).sort("_score", "-published") context["targeting"]["dfp_specialcoverage"] = special_coverage.slug if special_coverage.tunic_campaign_id: context["tunic_campaign_id"] = special_coverage.tunic_campaign_id context["targeting"].update({ "dfp_campaign_id": special_coverage.tunic_campaign_id }) # We do not augment sponsored special coverage lists. reading_list = self.update_reading_list(reading_list) else: reading_list = self.augment_reading_list(reading_list) context.update({ "name": special_coverage.name, "videos": special_coverage.videos, "content": reading_list }) return context if self.reading_list_identifier.startswith("section"): section = Section.objects.get_by_identifier(self.reading_list_identifier) reading_list = section.get_content() reading_list = self.augment_reading_list(reading_list) context.update({ "name": section.name, "content": reading_list }) return context reading_list = Content.search_objects.search() reading_list = self.augment_reading_list(reading_list, reverse_negate=True) context.update({ "name": "Recent News", "content": reading_list }) return context
python
def get_reading_list_context(self, **kwargs): """Returns the context dictionary for a given reading list.""" reading_list = None context = { "name": "", "content": reading_list, "targeting": {}, "videos": [] } if self.reading_list_identifier == "popular": reading_list = popular_content() context.update({"name": self.reading_list_identifier}) # Popular is augmented. reading_list = self.augment_reading_list(reading_list) context.update({"content": reading_list}) return context if self.reading_list_identifier.startswith("specialcoverage"): special_coverage = SpecialCoverage.objects.get_by_identifier( self.reading_list_identifier ) reading_list = special_coverage.get_content().query( SponsoredBoost(field_name="tunic_campaign_id") ).sort("_score", "-published") context["targeting"]["dfp_specialcoverage"] = special_coverage.slug if special_coverage.tunic_campaign_id: context["tunic_campaign_id"] = special_coverage.tunic_campaign_id context["targeting"].update({ "dfp_campaign_id": special_coverage.tunic_campaign_id }) # We do not augment sponsored special coverage lists. reading_list = self.update_reading_list(reading_list) else: reading_list = self.augment_reading_list(reading_list) context.update({ "name": special_coverage.name, "videos": special_coverage.videos, "content": reading_list }) return context if self.reading_list_identifier.startswith("section"): section = Section.objects.get_by_identifier(self.reading_list_identifier) reading_list = section.get_content() reading_list = self.augment_reading_list(reading_list) context.update({ "name": section.name, "content": reading_list }) return context reading_list = Content.search_objects.search() reading_list = self.augment_reading_list(reading_list, reverse_negate=True) context.update({ "name": "Recent News", "content": reading_list }) return context
[ "def", "get_reading_list_context", "(", "self", ",", "*", "*", "kwargs", ")", ":", "reading_list", "=", "None", "context", "=", "{", "\"name\"", ":", "\"\"", ",", "\"content\"", ":", "reading_list", ",", "\"targeting\"", ":", "{", "}", ",", "\"videos\"", "...
Returns the context dictionary for a given reading list.
[ "Returns", "the", "context", "dictionary", "for", "a", "given", "reading", "list", "." ]
train
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/reading_list/mixins.py#L133-L192
jonbeebe/frontmatter
frontmatter/__init__.py
Frontmatter.read_file
def read_file(cls, path): """Reads file at path and returns dict with separated frontmatter. See read() for more info on dict return value. """ with open(path, encoding="utf-8") as file: file_contents = file.read() return cls.read(file_contents)
python
def read_file(cls, path): """Reads file at path and returns dict with separated frontmatter. See read() for more info on dict return value. """ with open(path, encoding="utf-8") as file: file_contents = file.read() return cls.read(file_contents)
[ "def", "read_file", "(", "cls", ",", "path", ")", ":", "with", "open", "(", "path", ",", "encoding", "=", "\"utf-8\"", ")", "as", "file", ":", "file_contents", "=", "file", ".", "read", "(", ")", "return", "cls", ".", "read", "(", "file_contents", ")...
Reads file at path and returns dict with separated frontmatter. See read() for more info on dict return value.
[ "Reads", "file", "at", "path", "and", "returns", "dict", "with", "separated", "frontmatter", ".", "See", "read", "()", "for", "more", "info", "on", "dict", "return", "value", "." ]
train
https://github.com/jonbeebe/frontmatter/blob/68bfd0f76bd4ddeb60fc7c28320db03490c9a516/frontmatter/__init__.py#L12-L18
jonbeebe/frontmatter
frontmatter/__init__.py
Frontmatter.read
def read(cls, string): """Returns dict with separated frontmatter from string. Returned dict keys: attributes -- extracted YAML attributes in dict form. body -- string contents below the YAML separators frontmatter -- string representation of YAML """ fmatter = "" body = "" result = cls._regex.search(string) if result: fmatter = result.group(1) body = result.group(2) return { "attributes": yaml.load(fmatter), "body": body, "frontmatter": fmatter, }
python
def read(cls, string): """Returns dict with separated frontmatter from string. Returned dict keys: attributes -- extracted YAML attributes in dict form. body -- string contents below the YAML separators frontmatter -- string representation of YAML """ fmatter = "" body = "" result = cls._regex.search(string) if result: fmatter = result.group(1) body = result.group(2) return { "attributes": yaml.load(fmatter), "body": body, "frontmatter": fmatter, }
[ "def", "read", "(", "cls", ",", "string", ")", ":", "fmatter", "=", "\"\"", "body", "=", "\"\"", "result", "=", "cls", ".", "_regex", ".", "search", "(", "string", ")", "if", "result", ":", "fmatter", "=", "result", ".", "group", "(", "1", ")", "...
Returns dict with separated frontmatter from string. Returned dict keys: attributes -- extracted YAML attributes in dict form. body -- string contents below the YAML separators frontmatter -- string representation of YAML
[ "Returns", "dict", "with", "separated", "frontmatter", "from", "string", "." ]
train
https://github.com/jonbeebe/frontmatter/blob/68bfd0f76bd4ddeb60fc7c28320db03490c9a516/frontmatter/__init__.py#L21-L40
LIVVkit/LIVVkit
livvkit/components/performance.py
run_suite
def run_suite(case, config, summary): """ Run the full suite of performance tests """ config["name"] = case timing_data = dict() model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case) bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case) plot_dir = os.path.join(livvkit.output_dir, "performance", "imgs") model_cases = functions.collect_cases(model_dir) bench_cases = functions.collect_cases(bench_dir) functions.mkdir_p(plot_dir) # Generate all of the timing data for subcase in sorted(model_cases): bench_subcases = bench_cases[subcase] if subcase in bench_cases else [] timing_data[subcase] = dict() for mcase in model_cases[subcase]: config["case"] = "-".join([subcase, mcase]) bpath = (os.path.join(bench_dir, subcase, mcase.replace("-", os.path.sep)) if mcase in bench_subcases else None) mpath = os.path.join(model_dir, subcase, mcase.replace("-", os.path.sep)) timing_data[subcase][mcase] = _analyze_case(mpath, bpath, config) # Create scaling and timing breakdown plots weak_data = weak_scaling(timing_data, config['scaling_var'], config['weak_scaling_points']) strong_data = strong_scaling(timing_data, config['scaling_var'], config['strong_scaling_points']) timing_plots = [ generate_scaling_plot(weak_data, "Weak scaling for " + case.capitalize(), "runtime (s)", "", os.path.join(plot_dir, case + "_weak_scaling.png") ), weak_scaling_efficiency_plot(weak_data, "Weak scaling efficiency for " + case.capitalize(), "Parallel efficiency (% of linear)", "", os.path.join(plot_dir, case + "_weak_scaling_efficiency.png") ), generate_scaling_plot(strong_data, "Strong scaling for " + case.capitalize(), "Runtime (s)", "", os.path.join(plot_dir, case + "_strong_scaling.png") ), strong_scaling_efficiency_plot(strong_data, "Strong scaling efficiency for " + case.capitalize(), "Parallel efficiency (% of linear)", "", os.path.join(plot_dir, case + "_strong_scaling_efficiency.png") ), ] timing_plots = timing_plots + \ [generate_timing_breakdown_plot(timing_data[s], config['scaling_var'], "Timing breakdown for " + case.capitalize()+" "+s, "", os.path.join(plot_dir, case+"_"+s+"_timing_breakdown.png") ) for s in sorted(six.iterkeys(timing_data), key=functions.sort_scale)] # Build an image gallery and write the results el = [ elements.gallery("Performance Plots", timing_plots) ] result = elements.page(case, config["description"], element_list=el) summary[case] = _summarize_result(timing_data, config) _print_result(case, summary) functions.create_page_from_template("performance.html", os.path.join(livvkit.index_dir, "performance", case + ".html")) functions.write_json(result, os.path.join(livvkit.output_dir, "performance"), case + ".json")
python
def run_suite(case, config, summary): """ Run the full suite of performance tests """ config["name"] = case timing_data = dict() model_dir = os.path.join(livvkit.model_dir, config['data_dir'], case) bench_dir = os.path.join(livvkit.bench_dir, config['data_dir'], case) plot_dir = os.path.join(livvkit.output_dir, "performance", "imgs") model_cases = functions.collect_cases(model_dir) bench_cases = functions.collect_cases(bench_dir) functions.mkdir_p(plot_dir) # Generate all of the timing data for subcase in sorted(model_cases): bench_subcases = bench_cases[subcase] if subcase in bench_cases else [] timing_data[subcase] = dict() for mcase in model_cases[subcase]: config["case"] = "-".join([subcase, mcase]) bpath = (os.path.join(bench_dir, subcase, mcase.replace("-", os.path.sep)) if mcase in bench_subcases else None) mpath = os.path.join(model_dir, subcase, mcase.replace("-", os.path.sep)) timing_data[subcase][mcase] = _analyze_case(mpath, bpath, config) # Create scaling and timing breakdown plots weak_data = weak_scaling(timing_data, config['scaling_var'], config['weak_scaling_points']) strong_data = strong_scaling(timing_data, config['scaling_var'], config['strong_scaling_points']) timing_plots = [ generate_scaling_plot(weak_data, "Weak scaling for " + case.capitalize(), "runtime (s)", "", os.path.join(plot_dir, case + "_weak_scaling.png") ), weak_scaling_efficiency_plot(weak_data, "Weak scaling efficiency for " + case.capitalize(), "Parallel efficiency (% of linear)", "", os.path.join(plot_dir, case + "_weak_scaling_efficiency.png") ), generate_scaling_plot(strong_data, "Strong scaling for " + case.capitalize(), "Runtime (s)", "", os.path.join(plot_dir, case + "_strong_scaling.png") ), strong_scaling_efficiency_plot(strong_data, "Strong scaling efficiency for " + case.capitalize(), "Parallel efficiency (% of linear)", "", os.path.join(plot_dir, case + "_strong_scaling_efficiency.png") ), ] timing_plots = timing_plots + \ [generate_timing_breakdown_plot(timing_data[s], config['scaling_var'], "Timing breakdown for " + case.capitalize()+" "+s, "", os.path.join(plot_dir, case+"_"+s+"_timing_breakdown.png") ) for s in sorted(six.iterkeys(timing_data), key=functions.sort_scale)] # Build an image gallery and write the results el = [ elements.gallery("Performance Plots", timing_plots) ] result = elements.page(case, config["description"], element_list=el) summary[case] = _summarize_result(timing_data, config) _print_result(case, summary) functions.create_page_from_template("performance.html", os.path.join(livvkit.index_dir, "performance", case + ".html")) functions.write_json(result, os.path.join(livvkit.output_dir, "performance"), case + ".json")
[ "def", "run_suite", "(", "case", ",", "config", ",", "summary", ")", ":", "config", "[", "\"name\"", "]", "=", "case", "timing_data", "=", "dict", "(", ")", "model_dir", "=", "os", ".", "path", ".", "join", "(", "livvkit", ".", "model_dir", ",", "con...
Run the full suite of performance tests
[ "Run", "the", "full", "suite", "of", "performance", "tests" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L49-L121
LIVVkit/LIVVkit
livvkit/components/performance.py
_analyze_case
def _analyze_case(model_dir, bench_dir, config): """ Generates statistics from the timing summaries """ model_timings = set(glob.glob(os.path.join(model_dir, "*" + config["timing_ext"]))) if bench_dir is not None: bench_timings = set(glob.glob(os.path.join(bench_dir, "*" + config["timing_ext"]))) else: bench_timings = set() if not len(model_timings): return dict() model_stats = generate_timing_stats(model_timings, config['timing_vars']) bench_stats = generate_timing_stats(bench_timings, config['timing_vars']) return dict(model=model_stats, bench=bench_stats)
python
def _analyze_case(model_dir, bench_dir, config): """ Generates statistics from the timing summaries """ model_timings = set(glob.glob(os.path.join(model_dir, "*" + config["timing_ext"]))) if bench_dir is not None: bench_timings = set(glob.glob(os.path.join(bench_dir, "*" + config["timing_ext"]))) else: bench_timings = set() if not len(model_timings): return dict() model_stats = generate_timing_stats(model_timings, config['timing_vars']) bench_stats = generate_timing_stats(bench_timings, config['timing_vars']) return dict(model=model_stats, bench=bench_stats)
[ "def", "_analyze_case", "(", "model_dir", ",", "bench_dir", ",", "config", ")", ":", "model_timings", "=", "set", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "model_dir", ",", "\"*\"", "+", "config", "[", "\"timing_ext\"", "]", ...
Generates statistics from the timing summaries
[ "Generates", "statistics", "from", "the", "timing", "summaries" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L124-L135
LIVVkit/LIVVkit
livvkit/components/performance.py
_print_result
def _print_result(case, summary): """ Show some statistics from the run """ for case, case_data in summary.items(): for dof, data in case_data.items(): print(" " + case + " " + dof) print(" -------------------") for header, val in data.items(): print(" " + header + " : " + str(val)) print("")
python
def _print_result(case, summary): """ Show some statistics from the run """ for case, case_data in summary.items(): for dof, data in case_data.items(): print(" " + case + " " + dof) print(" -------------------") for header, val in data.items(): print(" " + header + " : " + str(val)) print("")
[ "def", "_print_result", "(", "case", ",", "summary", ")", ":", "for", "case", ",", "case_data", "in", "summary", ".", "items", "(", ")", ":", "for", "dof", ",", "data", "in", "case_data", ".", "items", "(", ")", ":", "print", "(", "\" \"", "+", ...
Show some statistics from the run
[ "Show", "some", "statistics", "from", "the", "run" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L139-L147
LIVVkit/LIVVkit
livvkit/components/performance.py
_summarize_result
def _summarize_result(result, config): """ Trim out some data to return for the index page """ timing_var = config['scaling_var'] summary = LIVVDict() for size, res in result.items(): proc_counts = [] bench_times = [] model_times = [] for proc, data in res.items(): proc_counts.append(int(proc[1:])) try: bench_times.append(data['bench'][timing_var]['mean']) except KeyError: pass try: model_times.append(data['model'][timing_var]['mean']) except KeyError: pass if model_times != [] and bench_times != []: time_diff = np.mean(model_times)/np.mean(bench_times) else: time_diff = 'NA' summary[size]['Proc. Counts'] = ", ".join([str(x) for x in sorted(proc_counts)]) summary[size]['Mean Time Diff (% of benchmark)'] = time_diff return summary
python
def _summarize_result(result, config): """ Trim out some data to return for the index page """ timing_var = config['scaling_var'] summary = LIVVDict() for size, res in result.items(): proc_counts = [] bench_times = [] model_times = [] for proc, data in res.items(): proc_counts.append(int(proc[1:])) try: bench_times.append(data['bench'][timing_var]['mean']) except KeyError: pass try: model_times.append(data['model'][timing_var]['mean']) except KeyError: pass if model_times != [] and bench_times != []: time_diff = np.mean(model_times)/np.mean(bench_times) else: time_diff = 'NA' summary[size]['Proc. Counts'] = ", ".join([str(x) for x in sorted(proc_counts)]) summary[size]['Mean Time Diff (% of benchmark)'] = time_diff return summary
[ "def", "_summarize_result", "(", "result", ",", "config", ")", ":", "timing_var", "=", "config", "[", "'scaling_var'", "]", "summary", "=", "LIVVDict", "(", ")", "for", "size", ",", "res", "in", "result", ".", "items", "(", ")", ":", "proc_counts", "=", ...
Trim out some data to return for the index page
[ "Trim", "out", "some", "data", "to", "return", "for", "the", "index", "page" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L150-L174
LIVVkit/LIVVkit
livvkit/components/performance.py
generate_timing_stats
def generate_timing_stats(file_list, var_list): """ Parse all of the timing files, and generate some statistics about the run. Args: file_list: A list of timing files to parse var_list: A list of variables to look for in the timing file Returns: A dict containing values that have the form: [mean, min, max, mean, standard deviation] """ timing_result = dict() timing_summary = dict() for file in file_list: timing_result[file] = functions.parse_gptl(file, var_list) for var in var_list: var_time = [] for f, data in timing_result.items(): try: var_time.append(data[var]) except: continue if len(var_time): timing_summary[var] = {'mean': np.mean(var_time), 'max': np.max(var_time), 'min': np.min(var_time), 'std': np.std(var_time)} return timing_summary
python
def generate_timing_stats(file_list, var_list): """ Parse all of the timing files, and generate some statistics about the run. Args: file_list: A list of timing files to parse var_list: A list of variables to look for in the timing file Returns: A dict containing values that have the form: [mean, min, max, mean, standard deviation] """ timing_result = dict() timing_summary = dict() for file in file_list: timing_result[file] = functions.parse_gptl(file, var_list) for var in var_list: var_time = [] for f, data in timing_result.items(): try: var_time.append(data[var]) except: continue if len(var_time): timing_summary[var] = {'mean': np.mean(var_time), 'max': np.max(var_time), 'min': np.min(var_time), 'std': np.std(var_time)} return timing_summary
[ "def", "generate_timing_stats", "(", "file_list", ",", "var_list", ")", ":", "timing_result", "=", "dict", "(", ")", "timing_summary", "=", "dict", "(", ")", "for", "file", "in", "file_list", ":", "timing_result", "[", "file", "]", "=", "functions", ".", "...
Parse all of the timing files, and generate some statistics about the run. Args: file_list: A list of timing files to parse var_list: A list of variables to look for in the timing file Returns: A dict containing values that have the form: [mean, min, max, mean, standard deviation]
[ "Parse", "all", "of", "the", "timing", "files", "and", "generate", "some", "statistics", "about", "the", "run", "." ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L185-L214
LIVVkit/LIVVkit
livvkit/components/performance.py
weak_scaling
def weak_scaling(timing_stats, scaling_var, data_points): """ Generate data for plotting weak scaling. The data points keep a constant amount of work per processor for each data point. Args: timing_stats: the result of the generate_timing_stats function scaling_var: the variable to select from the timing_stats dictionary (can be provided in configurations via the 'scaling_var' key) data_points: the list of size and processor counts to use as data (can be provided in configurations via the 'weak_scaling_points' key) Returns: A dict of the form: {'bench' : {'mins' : [], 'means' : [], 'maxs' : []}, 'model' : {'mins' : [], 'means' : [], 'maxs' : []}, 'proc_counts' : []} """ timing_data = dict() proc_counts = [] bench_means = [] bench_mins = [] bench_maxs = [] model_means = [] model_mins = [] model_maxs = [] for point in data_points: size = point[0] proc = point[1] try: model_data = timing_stats[size][proc]['model'][scaling_var] bench_data = timing_stats[size][proc]['bench'][scaling_var] except KeyError: continue proc_counts.append(proc) model_means.append(model_data['mean']) model_mins.append(model_data['min']) model_maxs.append(model_data['max']) bench_means.append(bench_data['mean']) bench_mins.append(bench_data['min']) bench_maxs.append(bench_data['max']) timing_data['bench'] = dict(mins=bench_mins, means=bench_means, maxs=bench_maxs) timing_data['model'] = dict(mins=model_mins, means=model_means, maxs=model_maxs) timing_data['proc_counts'] = [int(pc[1:]) for pc in proc_counts] return timing_data
python
def weak_scaling(timing_stats, scaling_var, data_points): """ Generate data for plotting weak scaling. The data points keep a constant amount of work per processor for each data point. Args: timing_stats: the result of the generate_timing_stats function scaling_var: the variable to select from the timing_stats dictionary (can be provided in configurations via the 'scaling_var' key) data_points: the list of size and processor counts to use as data (can be provided in configurations via the 'weak_scaling_points' key) Returns: A dict of the form: {'bench' : {'mins' : [], 'means' : [], 'maxs' : []}, 'model' : {'mins' : [], 'means' : [], 'maxs' : []}, 'proc_counts' : []} """ timing_data = dict() proc_counts = [] bench_means = [] bench_mins = [] bench_maxs = [] model_means = [] model_mins = [] model_maxs = [] for point in data_points: size = point[0] proc = point[1] try: model_data = timing_stats[size][proc]['model'][scaling_var] bench_data = timing_stats[size][proc]['bench'][scaling_var] except KeyError: continue proc_counts.append(proc) model_means.append(model_data['mean']) model_mins.append(model_data['min']) model_maxs.append(model_data['max']) bench_means.append(bench_data['mean']) bench_mins.append(bench_data['min']) bench_maxs.append(bench_data['max']) timing_data['bench'] = dict(mins=bench_mins, means=bench_means, maxs=bench_maxs) timing_data['model'] = dict(mins=model_mins, means=model_means, maxs=model_maxs) timing_data['proc_counts'] = [int(pc[1:]) for pc in proc_counts] return timing_data
[ "def", "weak_scaling", "(", "timing_stats", ",", "scaling_var", ",", "data_points", ")", ":", "timing_data", "=", "dict", "(", ")", "proc_counts", "=", "[", "]", "bench_means", "=", "[", "]", "bench_mins", "=", "[", "]", "bench_maxs", "=", "[", "]", "mod...
Generate data for plotting weak scaling. The data points keep a constant amount of work per processor for each data point. Args: timing_stats: the result of the generate_timing_stats function scaling_var: the variable to select from the timing_stats dictionary (can be provided in configurations via the 'scaling_var' key) data_points: the list of size and processor counts to use as data (can be provided in configurations via the 'weak_scaling_points' key) Returns: A dict of the form: {'bench' : {'mins' : [], 'means' : [], 'maxs' : []}, 'model' : {'mins' : [], 'means' : [], 'maxs' : []}, 'proc_counts' : []}
[ "Generate", "data", "for", "plotting", "weak", "scaling", ".", "The", "data", "points", "keep", "a", "constant", "amount", "of", "work", "per", "processor", "for", "each", "data", "point", "." ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L217-L261
LIVVkit/LIVVkit
livvkit/components/performance.py
generate_scaling_plot
def generate_scaling_plot(timing_data, title, ylabel, description, plot_file): """ Generate a scaling plot. Args: timing_data: data returned from a `*_scaling` method title: the title of the plot ylabel: the y-axis label of the plot description: a description of the plot plot_file: the file to write out to Returns: an image element containing the plot file and metadata """ proc_counts = timing_data['proc_counts'] if len(proc_counts) > 2: plt.figure(figsize=(10, 8), dpi=150) plt.title(title) plt.xlabel("Number of processors") plt.ylabel(ylabel) for case, case_color in zip(['bench', 'model'], ['#91bfdb', '#fc8d59']): case_data = timing_data[case] means = case_data['means'] mins = case_data['mins'] maxs = case_data['maxs'] plt.fill_between(proc_counts, mins, maxs, facecolor=case_color, alpha=0.5) plt.plot(proc_counts, means, 'o-', color=case_color, label=case) plt.legend(loc='best') else: plt.figure(figsize=(5, 3)) plt.axis('off') plt.text(0.4, 0.8, "ERROR:") plt.text(0.0, 0.6, "Not enough data points to draw scaling plot") plt.text(0.0, 0.44, "To generate this data rerun BATS with the") plt.text(0.0, 0.36, "performance option enabled.") if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return elements.image(title, description, os.path.basename(plot_file))
python
def generate_scaling_plot(timing_data, title, ylabel, description, plot_file): """ Generate a scaling plot. Args: timing_data: data returned from a `*_scaling` method title: the title of the plot ylabel: the y-axis label of the plot description: a description of the plot plot_file: the file to write out to Returns: an image element containing the plot file and metadata """ proc_counts = timing_data['proc_counts'] if len(proc_counts) > 2: plt.figure(figsize=(10, 8), dpi=150) plt.title(title) plt.xlabel("Number of processors") plt.ylabel(ylabel) for case, case_color in zip(['bench', 'model'], ['#91bfdb', '#fc8d59']): case_data = timing_data[case] means = case_data['means'] mins = case_data['mins'] maxs = case_data['maxs'] plt.fill_between(proc_counts, mins, maxs, facecolor=case_color, alpha=0.5) plt.plot(proc_counts, means, 'o-', color=case_color, label=case) plt.legend(loc='best') else: plt.figure(figsize=(5, 3)) plt.axis('off') plt.text(0.4, 0.8, "ERROR:") plt.text(0.0, 0.6, "Not enough data points to draw scaling plot") plt.text(0.0, 0.44, "To generate this data rerun BATS with the") plt.text(0.0, 0.36, "performance option enabled.") if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return elements.image(title, description, os.path.basename(plot_file))
[ "def", "generate_scaling_plot", "(", "timing_data", ",", "title", ",", "ylabel", ",", "description", ",", "plot_file", ")", ":", "proc_counts", "=", "timing_data", "[", "'proc_counts'", "]", "if", "len", "(", "proc_counts", ")", ">", "2", ":", "plt", ".", ...
Generate a scaling plot. Args: timing_data: data returned from a `*_scaling` method title: the title of the plot ylabel: the y-axis label of the plot description: a description of the plot plot_file: the file to write out to Returns: an image element containing the plot file and metadata
[ "Generate", "a", "scaling", "plot", "." ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L312-L354
LIVVkit/LIVVkit
livvkit/components/performance.py
generate_timing_breakdown_plot
def generate_timing_breakdown_plot(timing_stats, scaling_var, title, description, plot_file): """ Description Args: timing_stats: a dictionary of the form {proc_count : {model||bench : { var : { stat : val }}}} scaling_var: the variable that accounts for the total runtime title: the title of the plot description: the description of the plot plot_file: the file to write the plot out to Returns: an image element containing the plot file and metadata """ # noinspection PyProtectedMember cmap_data = colormaps._viridis_data n_subplots = len(six.viewkeys(timing_stats)) fig, ax = plt.subplots(1, n_subplots+1, figsize=(3*(n_subplots+2), 5)) for plot_num, p_count in enumerate( sorted(six.iterkeys(timing_stats), key=functions.sort_processor_counts)): case_data = timing_stats[p_count] all_timers = set(six.iterkeys(case_data['model'])) | set(six.iterkeys(case_data['bench'])) all_timers = sorted(list(all_timers), reverse=True) cmap_stride = int(len(cmap_data)/(len(all_timers)+1)) colors = {all_timers[i]: cmap_data[i*cmap_stride] for i in range(len(all_timers))} sub_ax = plt.subplot(1, n_subplots+1, plot_num+1) sub_ax.set_title(p_count) sub_ax.set_ylabel('Runtime (s)') for case, var_data in case_data.items(): if case == 'bench': bar_num = 2 else: bar_num = 1 offset = 0 if var_data != {}: for var in sorted(six.iterkeys(var_data), reverse=True): if var != scaling_var: plt.bar(bar_num, var_data[var]['mean'], 0.8, bottom=offset, color=colors[var], label=(var if bar_num == 1 else '_none')) offset += var_data[var]['mean'] plt.bar(bar_num, var_data[scaling_var]['mean']-offset, 0.8, bottom=offset, color=colors[scaling_var], label=(scaling_var if bar_num == 1 else '_none')) sub_ax.set_xticks([1.4, 2.4]) sub_ax.set_xticklabels(('test', 'bench')) plt.legend(loc=6, bbox_to_anchor=(1.05, 0.5)) plt.tight_layout() sub_ax = plt.subplot(1, n_subplots+1, n_subplots+1) hid_bar = plt.bar(1, 100) for group in hid_bar: group.set_visible(False) sub_ax.set_visible(False) if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return elements.image(title, description, os.path.basename(plot_file))
python
def generate_timing_breakdown_plot(timing_stats, scaling_var, title, description, plot_file): """ Description Args: timing_stats: a dictionary of the form {proc_count : {model||bench : { var : { stat : val }}}} scaling_var: the variable that accounts for the total runtime title: the title of the plot description: the description of the plot plot_file: the file to write the plot out to Returns: an image element containing the plot file and metadata """ # noinspection PyProtectedMember cmap_data = colormaps._viridis_data n_subplots = len(six.viewkeys(timing_stats)) fig, ax = plt.subplots(1, n_subplots+1, figsize=(3*(n_subplots+2), 5)) for plot_num, p_count in enumerate( sorted(six.iterkeys(timing_stats), key=functions.sort_processor_counts)): case_data = timing_stats[p_count] all_timers = set(six.iterkeys(case_data['model'])) | set(six.iterkeys(case_data['bench'])) all_timers = sorted(list(all_timers), reverse=True) cmap_stride = int(len(cmap_data)/(len(all_timers)+1)) colors = {all_timers[i]: cmap_data[i*cmap_stride] for i in range(len(all_timers))} sub_ax = plt.subplot(1, n_subplots+1, plot_num+1) sub_ax.set_title(p_count) sub_ax.set_ylabel('Runtime (s)') for case, var_data in case_data.items(): if case == 'bench': bar_num = 2 else: bar_num = 1 offset = 0 if var_data != {}: for var in sorted(six.iterkeys(var_data), reverse=True): if var != scaling_var: plt.bar(bar_num, var_data[var]['mean'], 0.8, bottom=offset, color=colors[var], label=(var if bar_num == 1 else '_none')) offset += var_data[var]['mean'] plt.bar(bar_num, var_data[scaling_var]['mean']-offset, 0.8, bottom=offset, color=colors[scaling_var], label=(scaling_var if bar_num == 1 else '_none')) sub_ax.set_xticks([1.4, 2.4]) sub_ax.set_xticklabels(('test', 'bench')) plt.legend(loc=6, bbox_to_anchor=(1.05, 0.5)) plt.tight_layout() sub_ax = plt.subplot(1, n_subplots+1, n_subplots+1) hid_bar = plt.bar(1, 100) for group in hid_bar: group.set_visible(False) sub_ax.set_visible(False) if livvkit.publish: plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600) plt.savefig(plot_file) plt.close() return elements.image(title, description, os.path.basename(plot_file))
[ "def", "generate_timing_breakdown_plot", "(", "timing_stats", ",", "scaling_var", ",", "title", ",", "description", ",", "plot_file", ")", ":", "# noinspection PyProtectedMember", "cmap_data", "=", "colormaps", ".", "_viridis_data", "n_subplots", "=", "len", "(", "six...
Description Args: timing_stats: a dictionary of the form {proc_count : {model||bench : { var : { stat : val }}}} scaling_var: the variable that accounts for the total runtime title: the title of the plot description: the description of the plot plot_file: the file to write the plot out to Returns: an image element containing the plot file and metadata
[ "Description" ]
train
https://github.com/LIVVkit/LIVVkit/blob/680120cd437e408673e62e535fc0a246c7fc17db/livvkit/components/performance.py#L405-L468
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.add_source
def add_source(self, ra, dec, flag='', radius=10*q.arcsec): """ Add a source to the catalog manually and find data in existing catalogs Parameters ---------- ra: astropy.units.quantity.Quantity The RA of the source dec: astropy.units.quantity.Quantity The Dec of the source flag: str A flag for the source radius: float The cross match radius for the list of catalogs """ # Get the id id = int(len(self.catalog)+1) # Check the coordinates ra = ra.to(q.deg) dec = dec.to(q.deg) datasets = 0 # Search the catalogs for this source for cat_name,params in self.catalogs.items(): self.Vizier_query(params['cat_loc'], cat_name, ra, dec, radius, ra_col=params['ra_col'], dec_col=params['dec_col'], append=True, group=False) # Add the source to the catalog self.catalog = self.catalog.append([id, ra.value, dec.value, flag, datasets], ignore_index=True)
python
def add_source(self, ra, dec, flag='', radius=10*q.arcsec): """ Add a source to the catalog manually and find data in existing catalogs Parameters ---------- ra: astropy.units.quantity.Quantity The RA of the source dec: astropy.units.quantity.Quantity The Dec of the source flag: str A flag for the source radius: float The cross match radius for the list of catalogs """ # Get the id id = int(len(self.catalog)+1) # Check the coordinates ra = ra.to(q.deg) dec = dec.to(q.deg) datasets = 0 # Search the catalogs for this source for cat_name,params in self.catalogs.items(): self.Vizier_query(params['cat_loc'], cat_name, ra, dec, radius, ra_col=params['ra_col'], dec_col=params['dec_col'], append=True, group=False) # Add the source to the catalog self.catalog = self.catalog.append([id, ra.value, dec.value, flag, datasets], ignore_index=True)
[ "def", "add_source", "(", "self", ",", "ra", ",", "dec", ",", "flag", "=", "''", ",", "radius", "=", "10", "*", "q", ".", "arcsec", ")", ":", "# Get the id", "id", "=", "int", "(", "len", "(", "self", ".", "catalog", ")", "+", "1", ")", "# Chec...
Add a source to the catalog manually and find data in existing catalogs Parameters ---------- ra: astropy.units.quantity.Quantity The RA of the source dec: astropy.units.quantity.Quantity The Dec of the source flag: str A flag for the source radius: float The cross match radius for the list of catalogs
[ "Add", "a", "source", "to", "the", "catalog", "manually", "and", "find", "data", "in", "existing", "catalogs", "Parameters", "----------", "ra", ":", "astropy", ".", "units", ".", "quantity", ".", "Quantity", "The", "RA", "of", "the", "source", "dec", ":",...
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L52-L80
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.delete_source
def delete_source(self, id): """ Delete a source from the catalog Parameters ---------- id: int The id of the source in the catalog """ # Set the index self.catalog.set_index('id') # Exclude the unwanted source self.catalog = self.catalog[self.catalog.id!=id] # Remove the records from the catalogs for cat_name in self.catalogs: new_cat = getattr(self, cat_name)[getattr(self, cat_name).source_id!=id] print('{} records removed from {} catalog'.format(int(len(getattr(self, cat_name))-len(new_cat)), cat_name)) setattr(self, cat_name, new_cat)
python
def delete_source(self, id): """ Delete a source from the catalog Parameters ---------- id: int The id of the source in the catalog """ # Set the index self.catalog.set_index('id') # Exclude the unwanted source self.catalog = self.catalog[self.catalog.id!=id] # Remove the records from the catalogs for cat_name in self.catalogs: new_cat = getattr(self, cat_name)[getattr(self, cat_name).source_id!=id] print('{} records removed from {} catalog'.format(int(len(getattr(self, cat_name))-len(new_cat)), cat_name)) setattr(self, cat_name, new_cat)
[ "def", "delete_source", "(", "self", ",", "id", ")", ":", "# Set the index", "self", ".", "catalog", ".", "set_index", "(", "'id'", ")", "# Exclude the unwanted source", "self", ".", "catalog", "=", "self", ".", "catalog", "[", "self", ".", "catalog", ".", ...
Delete a source from the catalog Parameters ---------- id: int The id of the source in the catalog
[ "Delete", "a", "source", "from", "the", "catalog", "Parameters", "----------", "id", ":", "int", "The", "id", "of", "the", "source", "in", "the", "catalog" ]
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L82-L101
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.ingest_data
def ingest_data(self, data, cat_name, id_col, ra_col='_RAJ2000', dec_col='_DEJ2000', cat_loc='', append=False, count=-1): """ Ingest a data file and regroup sources Parameters ---------- data: str, pandas.DataFrame, astropy.table.Table The path to the exported VizieR data or the data table cat_name: str The name of the added catalog id_col: str The name of the column containing the unique ids ra_col: str The name of the RA column dec_col: str The name of the DEC column cat_loc: str The location of the original catalog data append: bool Append the catalog rather than replace count: int The number of table rows to add (This is mainly for testing purposes) """ # Check if the catalog is already ingested if not append and cat_name in self.catalogs: print('Catalog {} already ingested.'.format(cat_name)) else: if isinstance(data, str): cat_loc = cat_loc or data data = pd.read_csv(data, sep='\t', comment='#', engine='python')[:count] elif isinstance(data, pd.core.frame.DataFrame): cat_loc = cat_loc or type(data) elif isinstance(data, (at.QTable, at.Table)): cat_loc = cat_loc or type(data) data = pd.DataFrame(list(data), columns=data.colnames) else: print("Sorry, but I cannot read that data. Try an ascii file cat_loc, astropy table, or pandas data frame.") return # Make sure ra and dec are decimal degrees if isinstance(data[ra_col][0], str): crds = coord.SkyCoord(ra=data[ra_col], dec=data[dec_col], unit=(q.hour, q.deg), frame='icrs') data.insert(0,'dec', crds.dec) data.insert(0,'ra', crds.ra) elif isinstance(data[ra_col][0], float): data.rename(columns={ra_col:'ra', dec_col:'dec'}, inplace=True) else: print("I can't read the RA and DEC of the input data. Please try again.") return # Change some names try: last = len(getattr(self, cat_name)) if append else 0 data.insert(0,'catID', ['{}_{}'.format(cat_name,n+1) for n in range(last,last+len(data))]) data.insert(0,'dec_corr', data['dec']) data.insert(0,'ra_corr', data['ra']) data.insert(0,'source_id', np.nan) print('Ingesting {} rows from {} catalog...'.format(len(data),cat_name)) # Save the raw data as an attribute if append: setattr(self, cat_name, getattr(self, cat_name).append(data, ignore_index=True)) else: setattr(self, cat_name, data) # Update the history self.history += "\n{}: Catalog {} ingested.".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),cat_name) self.catalogs.update({cat_name:{'cat_loc':cat_loc, 'id_col':id_col, 'ra_col':ra_col, 'dec_col':dec_col}}) except AttributeError: print("No catalog named '{}'. Set 'append=False' to create it.".format(cat_name))
python
def ingest_data(self, data, cat_name, id_col, ra_col='_RAJ2000', dec_col='_DEJ2000', cat_loc='', append=False, count=-1): """ Ingest a data file and regroup sources Parameters ---------- data: str, pandas.DataFrame, astropy.table.Table The path to the exported VizieR data or the data table cat_name: str The name of the added catalog id_col: str The name of the column containing the unique ids ra_col: str The name of the RA column dec_col: str The name of the DEC column cat_loc: str The location of the original catalog data append: bool Append the catalog rather than replace count: int The number of table rows to add (This is mainly for testing purposes) """ # Check if the catalog is already ingested if not append and cat_name in self.catalogs: print('Catalog {} already ingested.'.format(cat_name)) else: if isinstance(data, str): cat_loc = cat_loc or data data = pd.read_csv(data, sep='\t', comment='#', engine='python')[:count] elif isinstance(data, pd.core.frame.DataFrame): cat_loc = cat_loc or type(data) elif isinstance(data, (at.QTable, at.Table)): cat_loc = cat_loc or type(data) data = pd.DataFrame(list(data), columns=data.colnames) else: print("Sorry, but I cannot read that data. Try an ascii file cat_loc, astropy table, or pandas data frame.") return # Make sure ra and dec are decimal degrees if isinstance(data[ra_col][0], str): crds = coord.SkyCoord(ra=data[ra_col], dec=data[dec_col], unit=(q.hour, q.deg), frame='icrs') data.insert(0,'dec', crds.dec) data.insert(0,'ra', crds.ra) elif isinstance(data[ra_col][0], float): data.rename(columns={ra_col:'ra', dec_col:'dec'}, inplace=True) else: print("I can't read the RA and DEC of the input data. Please try again.") return # Change some names try: last = len(getattr(self, cat_name)) if append else 0 data.insert(0,'catID', ['{}_{}'.format(cat_name,n+1) for n in range(last,last+len(data))]) data.insert(0,'dec_corr', data['dec']) data.insert(0,'ra_corr', data['ra']) data.insert(0,'source_id', np.nan) print('Ingesting {} rows from {} catalog...'.format(len(data),cat_name)) # Save the raw data as an attribute if append: setattr(self, cat_name, getattr(self, cat_name).append(data, ignore_index=True)) else: setattr(self, cat_name, data) # Update the history self.history += "\n{}: Catalog {} ingested.".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),cat_name) self.catalogs.update({cat_name:{'cat_loc':cat_loc, 'id_col':id_col, 'ra_col':ra_col, 'dec_col':dec_col}}) except AttributeError: print("No catalog named '{}'. Set 'append=False' to create it.".format(cat_name))
[ "def", "ingest_data", "(", "self", ",", "data", ",", "cat_name", ",", "id_col", ",", "ra_col", "=", "'_RAJ2000'", ",", "dec_col", "=", "'_DEJ2000'", ",", "cat_loc", "=", "''", ",", "append", "=", "False", ",", "count", "=", "-", "1", ")", ":", "# Che...
Ingest a data file and regroup sources Parameters ---------- data: str, pandas.DataFrame, astropy.table.Table The path to the exported VizieR data or the data table cat_name: str The name of the added catalog id_col: str The name of the column containing the unique ids ra_col: str The name of the RA column dec_col: str The name of the DEC column cat_loc: str The location of the original catalog data append: bool Append the catalog rather than replace count: int The number of table rows to add (This is mainly for testing purposes)
[ "Ingest", "a", "data", "file", "and", "regroup", "sources", "Parameters", "----------", "data", ":", "str", "pandas", ".", "DataFrame", "astropy", ".", "table", ".", "Table", "The", "path", "to", "the", "exported", "VizieR", "data", "or", "the", "data", "t...
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L103-L186
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.inventory
def inventory(self, source_id): """ Look at the inventory for a given source Parameters ---------- source_id: int The id of the source to inspect """ if self.n_sources==0: print('Please run group_sources() to create the catalog first.') else: if source_id>self.n_sources or source_id<1 or not isinstance(source_id, int): print('Please enter an integer between 1 and',self.n_sources) else: print('Source:') print(at.Table.from_pandas(self.catalog[self.catalog['id']==source_id]).pprint()) for cat_name in self.catalogs: cat = getattr(self, cat_name) rows = cat[cat['source_id']==source_id] if not rows.empty: print('\n{}:'.format(cat_name)) at.Table.from_pandas(rows).pprint()
python
def inventory(self, source_id): """ Look at the inventory for a given source Parameters ---------- source_id: int The id of the source to inspect """ if self.n_sources==0: print('Please run group_sources() to create the catalog first.') else: if source_id>self.n_sources or source_id<1 or not isinstance(source_id, int): print('Please enter an integer between 1 and',self.n_sources) else: print('Source:') print(at.Table.from_pandas(self.catalog[self.catalog['id']==source_id]).pprint()) for cat_name in self.catalogs: cat = getattr(self, cat_name) rows = cat[cat['source_id']==source_id] if not rows.empty: print('\n{}:'.format(cat_name)) at.Table.from_pandas(rows).pprint()
[ "def", "inventory", "(", "self", ",", "source_id", ")", ":", "if", "self", ".", "n_sources", "==", "0", ":", "print", "(", "'Please run group_sources() to create the catalog first.'", ")", "else", ":", "if", "source_id", ">", "self", ".", "n_sources", "or", "s...
Look at the inventory for a given source Parameters ---------- source_id: int The id of the source to inspect
[ "Look", "at", "the", "inventory", "for", "a", "given", "source", "Parameters", "----------", "source_id", ":", "int", "The", "id", "of", "the", "source", "to", "inspect" ]
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L188-L214
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog._catalog_check
def _catalog_check(self, cat_name, append=False): """ Check to see if the name of the ingested catalog is valid Parameters ---------- cat_name: str The name of the catalog in the Catalog object append: bool Append the catalog rather than replace Returns ------- bool True if good catalog name else False """ good = True # Make sure the attribute name is good if cat_name[0].isdigit(): print("No names beginning with numbers please!") good = False # Make sure catalog is unique if not append and cat_name in self.catalogs: print("Catalog {} already ingested. Set 'append=True' to add more records.".format(cat_name)) good = False return good
python
def _catalog_check(self, cat_name, append=False): """ Check to see if the name of the ingested catalog is valid Parameters ---------- cat_name: str The name of the catalog in the Catalog object append: bool Append the catalog rather than replace Returns ------- bool True if good catalog name else False """ good = True # Make sure the attribute name is good if cat_name[0].isdigit(): print("No names beginning with numbers please!") good = False # Make sure catalog is unique if not append and cat_name in self.catalogs: print("Catalog {} already ingested. Set 'append=True' to add more records.".format(cat_name)) good = False return good
[ "def", "_catalog_check", "(", "self", ",", "cat_name", ",", "append", "=", "False", ")", ":", "good", "=", "True", "# Make sure the attribute name is good", "if", "cat_name", "[", "0", "]", ".", "isdigit", "(", ")", ":", "print", "(", "\"No names beginning wit...
Check to see if the name of the ingested catalog is valid Parameters ---------- cat_name: str The name of the catalog in the Catalog object append: bool Append the catalog rather than replace Returns ------- bool True if good catalog name else False
[ "Check", "to", "see", "if", "the", "name", "of", "the", "ingested", "catalog", "is", "valid", "Parameters", "----------", "cat_name", ":", "str", "The", "name", "of", "the", "catalog", "in", "the", "Catalog", "object", "append", ":", "bool", "Append", "the...
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L216-L244
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.SDSS_spectra_query
def SDSS_spectra_query(self, cat_name, ra, dec, radius, group=True, **kwargs): """ Use astroquery to search SDSS for sources within a search cone Parameters ---------- cat_name: str A name for the imported catalog (e.g. '2MASS') ra: astropy.units.quantity.Quantity The RA of the center of the cone search dec: astropy.units.quantity.Quantity The Dec of the center of the cone search radius: astropy.units.quantity.Quantity The radius of the cone search """ # Verify the cat_name if self._catalog_check(cat_name): # Prep the current catalog as an astropy.QTable tab = at.Table.from_pandas(self.catalog) # Cone search Vizier print("Searching SDSS for sources within {} of ({}, {}). Please be patient...".format(viz_cat, radius, ra, dec)) crds = coord.SkyCoord(ra=ra, dec=dec, frame='icrs') try: data = SDSS.query_region(crds, spectro=True, radius=radius) except: print("No data found in SDSS within {} of ({}, {}).".format(viz_cat, radius, ra, dec)) return # Ingest the data self.ingest_data(data, cat_name, 'id', ra_col=ra_col, dec_col=dec_col) # Regroup if len(self.catalogs)>1 and group: self.group_sources(self.xmatch_radius)
python
def SDSS_spectra_query(self, cat_name, ra, dec, radius, group=True, **kwargs): """ Use astroquery to search SDSS for sources within a search cone Parameters ---------- cat_name: str A name for the imported catalog (e.g. '2MASS') ra: astropy.units.quantity.Quantity The RA of the center of the cone search dec: astropy.units.quantity.Quantity The Dec of the center of the cone search radius: astropy.units.quantity.Quantity The radius of the cone search """ # Verify the cat_name if self._catalog_check(cat_name): # Prep the current catalog as an astropy.QTable tab = at.Table.from_pandas(self.catalog) # Cone search Vizier print("Searching SDSS for sources within {} of ({}, {}). Please be patient...".format(viz_cat, radius, ra, dec)) crds = coord.SkyCoord(ra=ra, dec=dec, frame='icrs') try: data = SDSS.query_region(crds, spectro=True, radius=radius) except: print("No data found in SDSS within {} of ({}, {}).".format(viz_cat, radius, ra, dec)) return # Ingest the data self.ingest_data(data, cat_name, 'id', ra_col=ra_col, dec_col=dec_col) # Regroup if len(self.catalogs)>1 and group: self.group_sources(self.xmatch_radius)
[ "def", "SDSS_spectra_query", "(", "self", ",", "cat_name", ",", "ra", ",", "dec", ",", "radius", ",", "group", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# Verify the cat_name", "if", "self", ".", "_catalog_check", "(", "cat_name", ")", ":", "# Pre...
Use astroquery to search SDSS for sources within a search cone Parameters ---------- cat_name: str A name for the imported catalog (e.g. '2MASS') ra: astropy.units.quantity.Quantity The RA of the center of the cone search dec: astropy.units.quantity.Quantity The Dec of the center of the cone search radius: astropy.units.quantity.Quantity The radius of the cone search
[ "Use", "astroquery", "to", "search", "SDSS", "for", "sources", "within", "a", "search", "cone", "Parameters", "----------", "cat_name", ":", "str", "A", "name", "for", "the", "imported", "catalog", "(", "e", ".", "g", ".", "2MASS", ")", "ra", ":", "astro...
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L246-L281
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.Vizier_query
def Vizier_query(self, viz_cat, cat_name, ra, dec, radius, ra_col='RAJ2000', dec_col='DEJ2000', columns=["**"], append=False, group=True, **kwargs): """ Use astroquery to search a catalog for sources within a search cone Parameters ---------- viz_cat: str The catalog string from Vizier (e.g. 'II/246' for 2MASS PSC) cat_name: str A name for the imported catalog (e.g. '2MASS') ra: astropy.units.quantity.Quantity The RA of the center of the cone search dec: astropy.units.quantity.Quantity The Dec of the center of the cone search radius: astropy.units.quantity.Quantity The radius of the cone search ra_col: str The name of the RA column in the raw catalog dec_col: str The name of the Dec column in the raw catalog columns: sequence The list of columns to pass to astroquery append: bool Append the catalog rather than replace """ # Verify the cat_name if self._catalog_check(cat_name, append=append): # Cone search Vizier print("Searching {} for sources within {} of ({}, {}). Please be patient...".format(viz_cat, radius, ra, dec)) crds = coord.SkyCoord(ra=ra, dec=dec, frame='icrs') V = Vizier(columns=columns, **kwargs) V.ROW_LIMIT = -1 try: data = V.query_region(crds, radius=radius, catalog=viz_cat)[0] except: print("No data found in {} within {} of ({}, {}).".format(viz_cat, radius, ra, dec)) return # Ingest the data self.ingest_data(data, cat_name, 'id', ra_col=ra_col, dec_col=dec_col, cat_loc=viz_cat, append=append) # Regroup if len(self.catalogs)>1 and group: self.group_sources(self.xmatch_radius)
python
def Vizier_query(self, viz_cat, cat_name, ra, dec, radius, ra_col='RAJ2000', dec_col='DEJ2000', columns=["**"], append=False, group=True, **kwargs): """ Use astroquery to search a catalog for sources within a search cone Parameters ---------- viz_cat: str The catalog string from Vizier (e.g. 'II/246' for 2MASS PSC) cat_name: str A name for the imported catalog (e.g. '2MASS') ra: astropy.units.quantity.Quantity The RA of the center of the cone search dec: astropy.units.quantity.Quantity The Dec of the center of the cone search radius: astropy.units.quantity.Quantity The radius of the cone search ra_col: str The name of the RA column in the raw catalog dec_col: str The name of the Dec column in the raw catalog columns: sequence The list of columns to pass to astroquery append: bool Append the catalog rather than replace """ # Verify the cat_name if self._catalog_check(cat_name, append=append): # Cone search Vizier print("Searching {} for sources within {} of ({}, {}). Please be patient...".format(viz_cat, radius, ra, dec)) crds = coord.SkyCoord(ra=ra, dec=dec, frame='icrs') V = Vizier(columns=columns, **kwargs) V.ROW_LIMIT = -1 try: data = V.query_region(crds, radius=radius, catalog=viz_cat)[0] except: print("No data found in {} within {} of ({}, {}).".format(viz_cat, radius, ra, dec)) return # Ingest the data self.ingest_data(data, cat_name, 'id', ra_col=ra_col, dec_col=dec_col, cat_loc=viz_cat, append=append) # Regroup if len(self.catalogs)>1 and group: self.group_sources(self.xmatch_radius)
[ "def", "Vizier_query", "(", "self", ",", "viz_cat", ",", "cat_name", ",", "ra", ",", "dec", ",", "radius", ",", "ra_col", "=", "'RAJ2000'", ",", "dec_col", "=", "'DEJ2000'", ",", "columns", "=", "[", "\"**\"", "]", ",", "append", "=", "False", ",", "...
Use astroquery to search a catalog for sources within a search cone Parameters ---------- viz_cat: str The catalog string from Vizier (e.g. 'II/246' for 2MASS PSC) cat_name: str A name for the imported catalog (e.g. '2MASS') ra: astropy.units.quantity.Quantity The RA of the center of the cone search dec: astropy.units.quantity.Quantity The Dec of the center of the cone search radius: astropy.units.quantity.Quantity The radius of the cone search ra_col: str The name of the RA column in the raw catalog dec_col: str The name of the Dec column in the raw catalog columns: sequence The list of columns to pass to astroquery append: bool Append the catalog rather than replace
[ "Use", "astroquery", "to", "search", "a", "catalog", "for", "sources", "within", "a", "search", "cone", "Parameters", "----------", "viz_cat", ":", "str", "The", "catalog", "string", "from", "Vizier", "(", "e", ".", "g", ".", "II", "/", "246", "for", "2M...
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L283-L328
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.Vizier_xmatch
def Vizier_xmatch(self, viz_cat, cat_name, ra_col='_RAJ2000', dec_col='_DEJ2000', radius='', group=True): """ Use astroquery to pull in and cross match a catalog with sources in self.catalog Parameters ---------- viz_cat: str The catalog string from Vizier (e.g. 'II/246' for 2MASS PSC) cat_name: str A name for the imported catalog (e.g. '2MASS') radius: astropy.units.quantity.Quantity The matching radius """ # Make sure sources have been grouped if self.catalog.empty: print('Please run group_sources() before cross matching.') return if self._catalog_check(cat_name): # Verify the cat_name viz_cat = "vizier:{}".format(viz_cat) # Prep the current catalog as an astropy.QTable tab = at.Table.from_pandas(self.catalog) # Crossmatch with Vizier print("Cross matching {} sources with {} catalog. Please be patient...".format(len(tab), viz_cat)) data = XMatch.query(cat1=tab, cat2=viz_cat, max_distance=radius or self.xmatch_radius*q.deg, colRA1='ra', colDec1='dec', colRA2=ra_col, colDec2=dec_col) # Ingest the data self.ingest_data(data, cat_name, 'id', ra_col=ra_col, dec_col=dec_col) # Regroup if group: self.group_sources(self.xmatch_radius)
python
def Vizier_xmatch(self, viz_cat, cat_name, ra_col='_RAJ2000', dec_col='_DEJ2000', radius='', group=True): """ Use astroquery to pull in and cross match a catalog with sources in self.catalog Parameters ---------- viz_cat: str The catalog string from Vizier (e.g. 'II/246' for 2MASS PSC) cat_name: str A name for the imported catalog (e.g. '2MASS') radius: astropy.units.quantity.Quantity The matching radius """ # Make sure sources have been grouped if self.catalog.empty: print('Please run group_sources() before cross matching.') return if self._catalog_check(cat_name): # Verify the cat_name viz_cat = "vizier:{}".format(viz_cat) # Prep the current catalog as an astropy.QTable tab = at.Table.from_pandas(self.catalog) # Crossmatch with Vizier print("Cross matching {} sources with {} catalog. Please be patient...".format(len(tab), viz_cat)) data = XMatch.query(cat1=tab, cat2=viz_cat, max_distance=radius or self.xmatch_radius*q.deg, colRA1='ra', colDec1='dec', colRA2=ra_col, colDec2=dec_col) # Ingest the data self.ingest_data(data, cat_name, 'id', ra_col=ra_col, dec_col=dec_col) # Regroup if group: self.group_sources(self.xmatch_radius)
[ "def", "Vizier_xmatch", "(", "self", ",", "viz_cat", ",", "cat_name", ",", "ra_col", "=", "'_RAJ2000'", ",", "dec_col", "=", "'_DEJ2000'", ",", "radius", "=", "''", ",", "group", "=", "True", ")", ":", "# Make sure sources have been grouped", "if", "self", "...
Use astroquery to pull in and cross match a catalog with sources in self.catalog Parameters ---------- viz_cat: str The catalog string from Vizier (e.g. 'II/246' for 2MASS PSC) cat_name: str A name for the imported catalog (e.g. '2MASS') radius: astropy.units.quantity.Quantity The matching radius
[ "Use", "astroquery", "to", "pull", "in", "and", "cross", "match", "a", "catalog", "with", "sources", "in", "self", ".", "catalog", "Parameters", "----------", "viz_cat", ":", "str", "The", "catalog", "string", "from", "Vizier", "(", "e", ".", "g", ".", "...
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L330-L365
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.group_sources
def group_sources(self, radius='', plot=False): """ Calculate the centers of the point clusters given the radius and minimum number of points Parameters ---------- coords: array-like The list of (x,y) coordinates of all clicks radius: int The distance threshold in degrees for cluster membership [default of 0.36 arcseconds] Returns ------- np.ndarray An array of the cluster centers """ if len(self.catalogs)==0: print("No catalogs to start grouping! Add one with the ingest_data() method first.") else: # Gather the catalogs print('Grouping sources from the following catalogs:',list(self.catalogs.keys())) cats = pd.concat([getattr(self, cat_name) for cat_name in self.catalogs]) # Clear the source grouping cats['oncID'] = np.nan cats['oncflag'] = '' self.xmatch_radius = radius if isinstance(radius,(float,int)) else self.xmatch_radius # Make a list of the coordinates of each catalog row coords = cats[['ra_corr','dec_corr']].values # Perform DBSCAN to find clusters db = DBSCAN(eps=self.xmatch_radius, min_samples=1, n_jobs=-1).fit(coords) # Group the sources core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True source_ids = db.labels_+1 unique_source_ids = list(set(source_ids)) self.n_sources = len(unique_source_ids) # Get the average coordinates of all clusters unique_coords = np.asarray([np.mean(coords[source_ids==id], axis=0) for id in list(set(source_ids))]) # Generate a source catalog self.catalog = pd.DataFrame(columns=('id','ra','dec','flag','datasets')) self.catalog['id'] = unique_source_ids self.catalog[['ra','dec']] = unique_coords self.catalog['flag'] = [None]*len(unique_source_ids) # self.catalog['flag'] = ['d{}'.format(i) if i>1 else '' for i in Counter(source_ids).values()] self.catalog['datasets'] = Counter(source_ids).values() # Update history self.history += "\n{}: Catalog grouped with radius {} arcsec.".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), self.xmatch_radius) # Update the source_ids in each catalog cats['source_id'] = source_ids for cat_name in self.catalogs: # Get the source_ids for the catalog cat_source_ids = cats.loc[cats['catID'].str.startswith(cat_name)]['source_id'] # Get the catalog cat = getattr(self, cat_name) # Update the source_ids and put it back cat['source_id'] = cat_source_ids setattr(self, cat_name, cat) del cat, cat_source_ids del cats # Plot it if plot: plt.figure() plt.title('{} clusters for {} sources'.format(self.n_sources,len(coords))) colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, self.n_sources)] for k, col in zip(unique_source_ids, colors): class_member_mask = (source_ids == k) xy = coords[class_member_mask & core_samples_mask] marker = 'o' if len(xy)==1: col = [0,0,0,1] marker = '+' plt.plot(xy[:, 0], xy[:, 1], color=tuple(col), marker=marker, markerfacecolor=tuple(col))
python
def group_sources(self, radius='', plot=False): """ Calculate the centers of the point clusters given the radius and minimum number of points Parameters ---------- coords: array-like The list of (x,y) coordinates of all clicks radius: int The distance threshold in degrees for cluster membership [default of 0.36 arcseconds] Returns ------- np.ndarray An array of the cluster centers """ if len(self.catalogs)==0: print("No catalogs to start grouping! Add one with the ingest_data() method first.") else: # Gather the catalogs print('Grouping sources from the following catalogs:',list(self.catalogs.keys())) cats = pd.concat([getattr(self, cat_name) for cat_name in self.catalogs]) # Clear the source grouping cats['oncID'] = np.nan cats['oncflag'] = '' self.xmatch_radius = radius if isinstance(radius,(float,int)) else self.xmatch_radius # Make a list of the coordinates of each catalog row coords = cats[['ra_corr','dec_corr']].values # Perform DBSCAN to find clusters db = DBSCAN(eps=self.xmatch_radius, min_samples=1, n_jobs=-1).fit(coords) # Group the sources core_samples_mask = np.zeros_like(db.labels_, dtype=bool) core_samples_mask[db.core_sample_indices_] = True source_ids = db.labels_+1 unique_source_ids = list(set(source_ids)) self.n_sources = len(unique_source_ids) # Get the average coordinates of all clusters unique_coords = np.asarray([np.mean(coords[source_ids==id], axis=0) for id in list(set(source_ids))]) # Generate a source catalog self.catalog = pd.DataFrame(columns=('id','ra','dec','flag','datasets')) self.catalog['id'] = unique_source_ids self.catalog[['ra','dec']] = unique_coords self.catalog['flag'] = [None]*len(unique_source_ids) # self.catalog['flag'] = ['d{}'.format(i) if i>1 else '' for i in Counter(source_ids).values()] self.catalog['datasets'] = Counter(source_ids).values() # Update history self.history += "\n{}: Catalog grouped with radius {} arcsec.".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), self.xmatch_radius) # Update the source_ids in each catalog cats['source_id'] = source_ids for cat_name in self.catalogs: # Get the source_ids for the catalog cat_source_ids = cats.loc[cats['catID'].str.startswith(cat_name)]['source_id'] # Get the catalog cat = getattr(self, cat_name) # Update the source_ids and put it back cat['source_id'] = cat_source_ids setattr(self, cat_name, cat) del cat, cat_source_ids del cats # Plot it if plot: plt.figure() plt.title('{} clusters for {} sources'.format(self.n_sources,len(coords))) colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, self.n_sources)] for k, col in zip(unique_source_ids, colors): class_member_mask = (source_ids == k) xy = coords[class_member_mask & core_samples_mask] marker = 'o' if len(xy)==1: col = [0,0,0,1] marker = '+' plt.plot(xy[:, 0], xy[:, 1], color=tuple(col), marker=marker, markerfacecolor=tuple(col))
[ "def", "group_sources", "(", "self", ",", "radius", "=", "''", ",", "plot", "=", "False", ")", ":", "if", "len", "(", "self", ".", "catalogs", ")", "==", "0", ":", "print", "(", "\"No catalogs to start grouping! Add one with the ingest_data() method first.\"", "...
Calculate the centers of the point clusters given the radius and minimum number of points Parameters ---------- coords: array-like The list of (x,y) coordinates of all clicks radius: int The distance threshold in degrees for cluster membership [default of 0.36 arcseconds] Returns ------- np.ndarray An array of the cluster centers
[ "Calculate", "the", "centers", "of", "the", "point", "clusters", "given", "the", "radius", "and", "minimum", "number", "of", "points" ]
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L367-L460
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.drop_catalog
def drop_catalog(self, cat_name): """ Remove an imported catalog from the Dataset object Parameters ---------- cat_name: str The name given to the catalog """ # Delete the name and data self.catalogs.pop(cat_name) delattr(self, cat_name) # Update history print("Deleted {} catalog.".format(cat_name)) self.history += "\n{}: Deleted {} catalog.".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), cat_name)
python
def drop_catalog(self, cat_name): """ Remove an imported catalog from the Dataset object Parameters ---------- cat_name: str The name given to the catalog """ # Delete the name and data self.catalogs.pop(cat_name) delattr(self, cat_name) # Update history print("Deleted {} catalog.".format(cat_name)) self.history += "\n{}: Deleted {} catalog.".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), cat_name)
[ "def", "drop_catalog", "(", "self", ",", "cat_name", ")", ":", "# Delete the name and data", "self", ".", "catalogs", ".", "pop", "(", "cat_name", ")", "delattr", "(", "self", ",", "cat_name", ")", "# Update history", "print", "(", "\"Deleted {} catalog.\"", "."...
Remove an imported catalog from the Dataset object Parameters ---------- cat_name: str The name given to the catalog
[ "Remove", "an", "imported", "catalog", "from", "the", "Dataset", "object", "Parameters", "----------", "cat_name", ":", "str", "The", "name", "given", "to", "the", "catalog" ]
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L468-L483
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.load
def load(self, path): """ Load the catalog from file Parameters ---------- path: str The path to the file """ # Get the object DB = joblib.load(path) # Load the attributes self.catalog = DB.catalog self.n_sources = DB.n_sources self.name = DB.name self.history = DB.history del DB
python
def load(self, path): """ Load the catalog from file Parameters ---------- path: str The path to the file """ # Get the object DB = joblib.load(path) # Load the attributes self.catalog = DB.catalog self.n_sources = DB.n_sources self.name = DB.name self.history = DB.history del DB
[ "def", "load", "(", "self", ",", "path", ")", ":", "# Get the object", "DB", "=", "joblib", ".", "load", "(", "path", ")", "# Load the attributes", "self", ".", "catalog", "=", "DB", ".", "catalog", "self", ".", "n_sources", "=", "DB", ".", "n_sources", ...
Load the catalog from file Parameters ---------- path: str The path to the file
[ "Load", "the", "catalog", "from", "file", "Parameters", "----------", "path", ":", "str", "The", "path", "to", "the", "file" ]
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L485-L503
BDNYC/astrodbkit
astrodbkit/astrocat.py
Catalog.correct_offsets
def correct_offsets(self, cat_name, truth='ACS'): """ Function to determine systematic, linear offsets between catalogs FUTURE -- do this with TweakReg, which also accounts for rotation/scaling See thread at https://github.com/spacetelescope/drizzlepac/issues/77 Parameters ---------- cat_name: str Name of catalog to correct truth: str The catalog to measure against """ # Must be grouped! if not self.xmatch_radius: print("Please run group_sources() before running correct_offsets().") else: # First, remove any previous catalog correction self.catalog.loc[self.catalog['cat_name']==cat_name, 'ra_corr'] = self.catalog.loc[self.catalog['cat_name']==cat_name, '_RAJ2000'] self.catalog.loc[self.catalog['cat_name']==cat_name, 'dec_corr'] = self.catalog.loc[self.catalog['cat_name']==cat_name, '_DEJ2000'] # Copy the catalog onc_gr = self.catalog.copy() # restrict to one-to-one matches, sort by oncID so that matches are paired o2o_new = onc_gr.loc[(onc_gr['oncflag'].str.contains('o')) & (onc_gr['cat_name'] == cat_name) ,:].sort_values('oncID') o2o_old = onc_gr.loc[(onc_gr['oncID'].isin(o2o_new['oncID']) & (onc_gr['cat_name'] == truth)), :].sort_values('oncID') # get coords c_o2o_new = SkyCoord(o2o_new.loc[o2o_new['cat_name'] == cat_name, 'ra_corr'],\ o2o_new.loc[o2o_new['cat_name'] == cat_name, 'dec_corr'], unit='degree') c_o2o_old = SkyCoord(o2o_old.loc[o2o_old['cat_name'] == truth, 'ra_corr'],\ o2o_old.loc[o2o_old['cat_name'] == truth, 'dec_corr'], unit='degree') print(len(c_o2o_old), 'one-to-one matches found!') if len(c_o2o_old)>0: delta_ra = [] delta_dec = [] for i in range(len(c_o2o_old)): # offsets FROM ACS TO new catalog ri, di = c_o2o_old[i].spherical_offsets_to(c_o2o_new[i]) delta_ra.append(ri.arcsecond) delta_dec.append(di.arcsecond) progress_meter((i+1)*100./len(c_o2o_old)) delta_ra = np.array(delta_ra) delta_dec = np.array(delta_dec) print('\n') # fit a gaussian mu_ra, std_ra = norm.fit(delta_ra) mu_dec, std_dec = norm.fit(delta_dec) # Fix precision mu_ra = round(mu_ra, 6) mu_dec = round(mu_dec, 6) # Update the coordinates of the appropriate sources print('Shifting {} sources by {}" in RA and {}" in Dec...'.format(cat_name,mu_ra,mu_dec)) self.catalog.loc[self.catalog['cat_name']==cat_name, 'ra_corr'] += mu_ra self.catalog.loc[self.catalog['cat_name']==cat_name, 'dec_corr'] += mu_dec # Update history now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") self.history += "\n{}: {} sources shifted by {} deg in RA and {} deg in Declination.".format(now, cat_name, mu_ra, mu_dec) # Regroup the sources since many have moved self.group_sources(self.xmatch_radius) else: print('Cannot correct offsets in {} sources.'.format(cat_name))
python
def correct_offsets(self, cat_name, truth='ACS'): """ Function to determine systematic, linear offsets between catalogs FUTURE -- do this with TweakReg, which also accounts for rotation/scaling See thread at https://github.com/spacetelescope/drizzlepac/issues/77 Parameters ---------- cat_name: str Name of catalog to correct truth: str The catalog to measure against """ # Must be grouped! if not self.xmatch_radius: print("Please run group_sources() before running correct_offsets().") else: # First, remove any previous catalog correction self.catalog.loc[self.catalog['cat_name']==cat_name, 'ra_corr'] = self.catalog.loc[self.catalog['cat_name']==cat_name, '_RAJ2000'] self.catalog.loc[self.catalog['cat_name']==cat_name, 'dec_corr'] = self.catalog.loc[self.catalog['cat_name']==cat_name, '_DEJ2000'] # Copy the catalog onc_gr = self.catalog.copy() # restrict to one-to-one matches, sort by oncID so that matches are paired o2o_new = onc_gr.loc[(onc_gr['oncflag'].str.contains('o')) & (onc_gr['cat_name'] == cat_name) ,:].sort_values('oncID') o2o_old = onc_gr.loc[(onc_gr['oncID'].isin(o2o_new['oncID']) & (onc_gr['cat_name'] == truth)), :].sort_values('oncID') # get coords c_o2o_new = SkyCoord(o2o_new.loc[o2o_new['cat_name'] == cat_name, 'ra_corr'],\ o2o_new.loc[o2o_new['cat_name'] == cat_name, 'dec_corr'], unit='degree') c_o2o_old = SkyCoord(o2o_old.loc[o2o_old['cat_name'] == truth, 'ra_corr'],\ o2o_old.loc[o2o_old['cat_name'] == truth, 'dec_corr'], unit='degree') print(len(c_o2o_old), 'one-to-one matches found!') if len(c_o2o_old)>0: delta_ra = [] delta_dec = [] for i in range(len(c_o2o_old)): # offsets FROM ACS TO new catalog ri, di = c_o2o_old[i].spherical_offsets_to(c_o2o_new[i]) delta_ra.append(ri.arcsecond) delta_dec.append(di.arcsecond) progress_meter((i+1)*100./len(c_o2o_old)) delta_ra = np.array(delta_ra) delta_dec = np.array(delta_dec) print('\n') # fit a gaussian mu_ra, std_ra = norm.fit(delta_ra) mu_dec, std_dec = norm.fit(delta_dec) # Fix precision mu_ra = round(mu_ra, 6) mu_dec = round(mu_dec, 6) # Update the coordinates of the appropriate sources print('Shifting {} sources by {}" in RA and {}" in Dec...'.format(cat_name,mu_ra,mu_dec)) self.catalog.loc[self.catalog['cat_name']==cat_name, 'ra_corr'] += mu_ra self.catalog.loc[self.catalog['cat_name']==cat_name, 'dec_corr'] += mu_dec # Update history now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") self.history += "\n{}: {} sources shifted by {} deg in RA and {} deg in Declination.".format(now, cat_name, mu_ra, mu_dec) # Regroup the sources since many have moved self.group_sources(self.xmatch_radius) else: print('Cannot correct offsets in {} sources.'.format(cat_name))
[ "def", "correct_offsets", "(", "self", ",", "cat_name", ",", "truth", "=", "'ACS'", ")", ":", "# Must be grouped!", "if", "not", "self", ".", "xmatch_radius", ":", "print", "(", "\"Please run group_sources() before running correct_offsets().\"", ")", "else", ":", "#...
Function to determine systematic, linear offsets between catalogs FUTURE -- do this with TweakReg, which also accounts for rotation/scaling See thread at https://github.com/spacetelescope/drizzlepac/issues/77 Parameters ---------- cat_name: str Name of catalog to correct truth: str The catalog to measure against
[ "Function", "to", "determine", "systematic", "linear", "offsets", "between", "catalogs", "FUTURE", "--", "do", "this", "with", "TweakReg", "which", "also", "accounts", "for", "rotation", "/", "scaling", "See", "thread", "at", "https", ":", "//", "github", ".",...
train
https://github.com/BDNYC/astrodbkit/blob/02c03c5e91aa7c7b0f3b5fa95bcf71e33ffcee09/astrodbkit/astrocat.py#L516-L597
inveniosoftware/invenio-pages
invenio_pages/ext.py
_InvenioPagesState.jinja_env
def jinja_env(self): """Create a sandboxed Jinja environment.""" if self._jinja_env is None: self._jinja_env = SandboxedEnvironment( extensions=[ 'jinja2.ext.autoescape', 'jinja2.ext.with_', ], autoescape=True, ) self._jinja_env.globals['url_for'] = url_for # Load whitelisted configuration variables. for var in self.app.config['PAGES_WHITELIST_CONFIG_KEYS']: self._jinja_env.globals[var] = self.app.config.get(var) return self._jinja_env
python
def jinja_env(self): """Create a sandboxed Jinja environment.""" if self._jinja_env is None: self._jinja_env = SandboxedEnvironment( extensions=[ 'jinja2.ext.autoescape', 'jinja2.ext.with_', ], autoescape=True, ) self._jinja_env.globals['url_for'] = url_for # Load whitelisted configuration variables. for var in self.app.config['PAGES_WHITELIST_CONFIG_KEYS']: self._jinja_env.globals[var] = self.app.config.get(var) return self._jinja_env
[ "def", "jinja_env", "(", "self", ")", ":", "if", "self", ".", "_jinja_env", "is", "None", ":", "self", ".", "_jinja_env", "=", "SandboxedEnvironment", "(", "extensions", "=", "[", "'jinja2.ext.autoescape'", ",", "'jinja2.ext.with_'", ",", "]", ",", "autoescape...
Create a sandboxed Jinja environment.
[ "Create", "a", "sandboxed", "Jinja", "environment", "." ]
train
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/ext.py#L52-L64
inveniosoftware/invenio-pages
invenio_pages/ext.py
_InvenioPagesState.render_template
def render_template(self, source, **kwargs_context): r"""Render a template string using sandboxed environment. :param source: A string containing the page source. :param \*\*kwargs_context: The context associated with the page. :returns: The rendered template. """ return self.jinja_env.from_string(source).render(kwargs_context)
python
def render_template(self, source, **kwargs_context): r"""Render a template string using sandboxed environment. :param source: A string containing the page source. :param \*\*kwargs_context: The context associated with the page. :returns: The rendered template. """ return self.jinja_env.from_string(source).render(kwargs_context)
[ "def", "render_template", "(", "self", ",", "source", ",", "*", "*", "kwargs_context", ")", ":", "return", "self", ".", "jinja_env", ".", "from_string", "(", "source", ")", ".", "render", "(", "kwargs_context", ")" ]
r"""Render a template string using sandboxed environment. :param source: A string containing the page source. :param \*\*kwargs_context: The context associated with the page. :returns: The rendered template.
[ "r", "Render", "a", "template", "string", "using", "sandboxed", "environment", "." ]
train
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/ext.py#L66-L73
inveniosoftware/invenio-pages
invenio_pages/ext.py
InvenioPages.wrap_errorhandler
def wrap_errorhandler(app): """Wrap error handler. :param app: The Flask application. """ try: existing_handler = app.error_handler_spec[None][404][NotFound] except (KeyError, TypeError): existing_handler = None if existing_handler: app.error_handler_spec[None][404][NotFound] = \ lambda error: handle_not_found(error, wrapped=existing_handler) else: app.error_handler_spec.setdefault(None, {}).setdefault(404, {}) app.error_handler_spec[None][404][NotFound] = handle_not_found
python
def wrap_errorhandler(app): """Wrap error handler. :param app: The Flask application. """ try: existing_handler = app.error_handler_spec[None][404][NotFound] except (KeyError, TypeError): existing_handler = None if existing_handler: app.error_handler_spec[None][404][NotFound] = \ lambda error: handle_not_found(error, wrapped=existing_handler) else: app.error_handler_spec.setdefault(None, {}).setdefault(404, {}) app.error_handler_spec[None][404][NotFound] = handle_not_found
[ "def", "wrap_errorhandler", "(", "app", ")", ":", "try", ":", "existing_handler", "=", "app", ".", "error_handler_spec", "[", "None", "]", "[", "404", "]", "[", "NotFound", "]", "except", "(", "KeyError", ",", "TypeError", ")", ":", "existing_handler", "="...
Wrap error handler. :param app: The Flask application.
[ "Wrap", "error", "handler", "." ]
train
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/ext.py#L88-L103
inveniosoftware/invenio-pages
invenio_pages/ext.py
InvenioPages.init_app
def init_app(self, app): """Flask application initialization. :param app: The Flask application. :returns: The :class:`invenio_pages.ext.InvenioPages` instance initialized. """ self.init_config(app) self.wrap_errorhandler(app) app.extensions['invenio-pages'] = _InvenioPagesState(app) return app.extensions['invenio-pages']
python
def init_app(self, app): """Flask application initialization. :param app: The Flask application. :returns: The :class:`invenio_pages.ext.InvenioPages` instance initialized. """ self.init_config(app) self.wrap_errorhandler(app) app.extensions['invenio-pages'] = _InvenioPagesState(app) return app.extensions['invenio-pages']
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "self", ".", "init_config", "(", "app", ")", "self", ".", "wrap_errorhandler", "(", "app", ")", "app", ".", "extensions", "[", "'invenio-pages'", "]", "=", "_InvenioPagesState", "(", "app", ")", "retu...
Flask application initialization. :param app: The Flask application. :returns: The :class:`invenio_pages.ext.InvenioPages` instance initialized.
[ "Flask", "application", "initialization", "." ]
train
https://github.com/inveniosoftware/invenio-pages/blob/8d544d72fb4c22b7134c521f435add0abed42544/invenio_pages/ext.py#L105-L117
PGower/PyCanvas
pycanvas/apis/conferences.py
ConferencesAPI.list_conferences_groups
def list_conferences_groups(self, group_id): """ List conferences. Retrieve the list of conferences for this context This API returns a JSON object containing the list of conferences, the key for the list of conferences is "conferences" """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id self.logger.debug("GET /api/v1/groups/{group_id}/conferences with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/conferences".format(**path), data=data, params=params, all_pages=True)
python
def list_conferences_groups(self, group_id): """ List conferences. Retrieve the list of conferences for this context This API returns a JSON object containing the list of conferences, the key for the list of conferences is "conferences" """ path = {} data = {} params = {} # REQUIRED - PATH - group_id """ID""" path["group_id"] = group_id self.logger.debug("GET /api/v1/groups/{group_id}/conferences with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/groups/{group_id}/conferences".format(**path), data=data, params=params, all_pages=True)
[ "def", "list_conferences_groups", "(", "self", ",", "group_id", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - group_id\r", "\"\"\"ID\"\"\"", "path", "[", "\"group_id\"", "]", "=", "group_id", "self", "....
List conferences. Retrieve the list of conferences for this context This API returns a JSON object containing the list of conferences, the key for the list of conferences is "conferences"
[ "List", "conferences", ".", "Retrieve", "the", "list", "of", "conferences", "for", "this", "context", "This", "API", "returns", "a", "JSON", "object", "containing", "the", "list", "of", "conferences", "the", "key", "for", "the", "list", "of", "conferences", ...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/conferences.py#L39-L57
PGower/PyCanvas
pycanvas/apis/quiz_assignment_overrides.py
QuizAssignmentOverridesAPI.retrieve_assignment_overridden_dates_for_quizzes
def retrieve_assignment_overridden_dates_for_quizzes(self, course_id, quiz_assignment_overrides_0_quiz_ids=None): """ Retrieve assignment-overridden dates for quizzes. Retrieve the actual due-at, unlock-at, and available-at dates for quizzes based on the assignment overrides active for the current API user. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - quiz_assignment_overrides[0][quiz_ids] """An array of quiz IDs. If omitted, overrides for all quizzes available to the operating user will be returned.""" if quiz_assignment_overrides_0_quiz_ids is not None: params["quiz_assignment_overrides[0][quiz_ids]"] = quiz_assignment_overrides_0_quiz_ids self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/assignment_overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/assignment_overrides".format(**path), data=data, params=params, single_item=True)
python
def retrieve_assignment_overridden_dates_for_quizzes(self, course_id, quiz_assignment_overrides_0_quiz_ids=None): """ Retrieve assignment-overridden dates for quizzes. Retrieve the actual due-at, unlock-at, and available-at dates for quizzes based on the assignment overrides active for the current API user. """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # OPTIONAL - quiz_assignment_overrides[0][quiz_ids] """An array of quiz IDs. If omitted, overrides for all quizzes available to the operating user will be returned.""" if quiz_assignment_overrides_0_quiz_ids is not None: params["quiz_assignment_overrides[0][quiz_ids]"] = quiz_assignment_overrides_0_quiz_ids self.logger.debug("GET /api/v1/courses/{course_id}/quizzes/assignment_overrides with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/courses/{course_id}/quizzes/assignment_overrides".format(**path), data=data, params=params, single_item=True)
[ "def", "retrieve_assignment_overridden_dates_for_quizzes", "(", "self", ",", "course_id", ",", "quiz_assignment_overrides_0_quiz_ids", "=", "None", ")", ":", "path", "=", "{", "}", "data", "=", "{", "}", "params", "=", "{", "}", "# REQUIRED - PATH - course_id\r", "\...
Retrieve assignment-overridden dates for quizzes. Retrieve the actual due-at, unlock-at, and available-at dates for quizzes based on the assignment overrides active for the current API user.
[ "Retrieve", "assignment", "-", "overridden", "dates", "for", "quizzes", ".", "Retrieve", "the", "actual", "due", "-", "at", "unlock", "-", "at", "and", "available", "-", "at", "dates", "for", "quizzes", "based", "on", "the", "assignment", "overrides", "activ...
train
https://github.com/PGower/PyCanvas/blob/68520005382b440a1e462f9df369f54d364e21e8/pycanvas/apis/quiz_assignment_overrides.py#L19-L41
hbldh/sudokuextract
sudokuextract/ml/knn.py
KNeighborsClassifier.fit
def fit(self, X, y): """Fit the model using X as training data and y as target values""" self._data = X self._classes = np.unique(y) self._labels = y self._is_fitted = True
python
def fit(self, X, y): """Fit the model using X as training data and y as target values""" self._data = X self._classes = np.unique(y) self._labels = y self._is_fitted = True
[ "def", "fit", "(", "self", ",", "X", ",", "y", ")", ":", "self", ".", "_data", "=", "X", "self", ".", "_classes", "=", "np", ".", "unique", "(", "y", ")", "self", ".", "_labels", "=", "y", "self", ".", "_is_fitted", "=", "True" ]
Fit the model using X as training data and y as target values
[ "Fit", "the", "model", "using", "X", "as", "training", "data", "and", "y", "as", "target", "values" ]
train
https://github.com/hbldh/sudokuextract/blob/0dff3b46b9896a8bedfc474c61a089e7901f720c/sudokuextract/ml/knn.py#L90-L96
hbldh/sudokuextract
sudokuextract/ml/knn.py
KNeighborsClassifier.predict
def predict(self, X): """Predict the class labels for the provided data Parameters ---------- X : array-like, shape (n_query, n_features). Test samples. Returns ------- y : array of shape [n_samples] Class labels for each data sample. """ # TODO: Make classification of multiple samples a bit more effective... if X.ndim > 1 and X.shape[1] != 1: out = [] for x in X: out += self.predict(x) return out X = X.flatten() if self.metric == 'minkowski': dists = np.sum(np.abs(self._data - X) ** self.p, axis=1) else: # TODO: Implement other metrics. raise ValueError("Only Minkowski distance metric implemented...") argument = np.argsort(dists) labels = self._labels[argument[:self.n_neighbors]] if self.weights == 'distance': weights = 1 / dists[argument[:self.n_neighbors]] out = np.zeros((len(self._classes), ), 'float') for i, c in enumerate(self._classes): out[i] = np.sum(weights[labels == c]) out /= np.sum(out) y_pred = self._labels[np.argmax(out)] else: y_pred, _ = mode(labels) return y_pred.tolist()
python
def predict(self, X): """Predict the class labels for the provided data Parameters ---------- X : array-like, shape (n_query, n_features). Test samples. Returns ------- y : array of shape [n_samples] Class labels for each data sample. """ # TODO: Make classification of multiple samples a bit more effective... if X.ndim > 1 and X.shape[1] != 1: out = [] for x in X: out += self.predict(x) return out X = X.flatten() if self.metric == 'minkowski': dists = np.sum(np.abs(self._data - X) ** self.p, axis=1) else: # TODO: Implement other metrics. raise ValueError("Only Minkowski distance metric implemented...") argument = np.argsort(dists) labels = self._labels[argument[:self.n_neighbors]] if self.weights == 'distance': weights = 1 / dists[argument[:self.n_neighbors]] out = np.zeros((len(self._classes), ), 'float') for i, c in enumerate(self._classes): out[i] = np.sum(weights[labels == c]) out /= np.sum(out) y_pred = self._labels[np.argmax(out)] else: y_pred, _ = mode(labels) return y_pred.tolist()
[ "def", "predict", "(", "self", ",", "X", ")", ":", "# TODO: Make classification of multiple samples a bit more effective...", "if", "X", ".", "ndim", ">", "1", "and", "X", ".", "shape", "[", "1", "]", "!=", "1", ":", "out", "=", "[", "]", "for", "x", "in...
Predict the class labels for the provided data Parameters ---------- X : array-like, shape (n_query, n_features). Test samples. Returns ------- y : array of shape [n_samples] Class labels for each data sample.
[ "Predict", "the", "class", "labels", "for", "the", "provided", "data" ]
train
https://github.com/hbldh/sudokuextract/blob/0dff3b46b9896a8bedfc474c61a089e7901f720c/sudokuextract/ml/knn.py#L98-L139
lobeck/flask-bower
flask_bower/__init__.py
replaced_url_for
def replaced_url_for(endpoint, filename=None, **values): """ This function acts as "replacement" for the default url_for() and intercepts if it is a request for bower assets If the file is not available in bower, the result is passed to flasks url_for(). This is useful - but not recommended - for "overlaying" the static directory (see README.rst). """ lookup_result = overlay_url_for(endpoint, filename, **values) if lookup_result is not None: return lookup_result return url_for(endpoint, filename=filename, **values)
python
def replaced_url_for(endpoint, filename=None, **values): """ This function acts as "replacement" for the default url_for() and intercepts if it is a request for bower assets If the file is not available in bower, the result is passed to flasks url_for(). This is useful - but not recommended - for "overlaying" the static directory (see README.rst). """ lookup_result = overlay_url_for(endpoint, filename, **values) if lookup_result is not None: return lookup_result return url_for(endpoint, filename=filename, **values)
[ "def", "replaced_url_for", "(", "endpoint", ",", "filename", "=", "None", ",", "*", "*", "values", ")", ":", "lookup_result", "=", "overlay_url_for", "(", "endpoint", ",", "filename", ",", "*", "*", "values", ")", "if", "lookup_result", "is", "not", "None"...
This function acts as "replacement" for the default url_for() and intercepts if it is a request for bower assets If the file is not available in bower, the result is passed to flasks url_for(). This is useful - but not recommended - for "overlaying" the static directory (see README.rst).
[ "This", "function", "acts", "as", "replacement", "for", "the", "default", "url_for", "()", "and", "intercepts", "if", "it", "is", "a", "request", "for", "bower", "assets" ]
train
https://github.com/lobeck/flask-bower/blob/3ebe08a0931d07e82cb57998db3390d2b5921444/flask_bower/__init__.py#L41-L53
lobeck/flask-bower
flask_bower/__init__.py
handle_url_error
def handle_url_error(error, endpoint, values): """ Intercept BuildErrors of url_for() using flasks build_error_handler API """ url = overlay_url_for(endpoint, **values) if url is None: exc_type, exc_value, tb = sys.exc_info() if exc_value is error: reraise(exc_type, exc_value, tb) else: raise error # url_for will use this result, instead of raising BuildError. return url
python
def handle_url_error(error, endpoint, values): """ Intercept BuildErrors of url_for() using flasks build_error_handler API """ url = overlay_url_for(endpoint, **values) if url is None: exc_type, exc_value, tb = sys.exc_info() if exc_value is error: reraise(exc_type, exc_value, tb) else: raise error # url_for will use this result, instead of raising BuildError. return url
[ "def", "handle_url_error", "(", "error", ",", "endpoint", ",", "values", ")", ":", "url", "=", "overlay_url_for", "(", "endpoint", ",", "*", "*", "values", ")", "if", "url", "is", "None", ":", "exc_type", ",", "exc_value", ",", "tb", "=", "sys", ".", ...
Intercept BuildErrors of url_for() using flasks build_error_handler API
[ "Intercept", "BuildErrors", "of", "url_for", "()", "using", "flasks", "build_error_handler", "API" ]
train
https://github.com/lobeck/flask-bower/blob/3ebe08a0931d07e82cb57998db3390d2b5921444/flask_bower/__init__.py#L56-L68
lobeck/flask-bower
flask_bower/__init__.py
overlay_url_for
def overlay_url_for(endpoint, filename=None, **values): """ Replace flasks url_for() function to allow usage without template changes If the requested endpoint is static or ending in .static, it tries to serve a bower asset, otherwise it will pass the arguments to flask.url_for() See http://flask.pocoo.org/docs/0.10/api/#flask.url_for """ default_url_for_args = values.copy() if filename: default_url_for_args['filename'] = filename if endpoint == 'static' or endpoint.endswith('.static'): if os.path.sep in filename: filename_parts = filename.split(os.path.sep) component = filename_parts[0] # Using * magic here to expand list filename = os.path.join(*filename_parts[1:]) returned_url = build_url(component, filename, **values) if returned_url is not None: return returned_url return None
python
def overlay_url_for(endpoint, filename=None, **values): """ Replace flasks url_for() function to allow usage without template changes If the requested endpoint is static or ending in .static, it tries to serve a bower asset, otherwise it will pass the arguments to flask.url_for() See http://flask.pocoo.org/docs/0.10/api/#flask.url_for """ default_url_for_args = values.copy() if filename: default_url_for_args['filename'] = filename if endpoint == 'static' or endpoint.endswith('.static'): if os.path.sep in filename: filename_parts = filename.split(os.path.sep) component = filename_parts[0] # Using * magic here to expand list filename = os.path.join(*filename_parts[1:]) returned_url = build_url(component, filename, **values) if returned_url is not None: return returned_url return None
[ "def", "overlay_url_for", "(", "endpoint", ",", "filename", "=", "None", ",", "*", "*", "values", ")", ":", "default_url_for_args", "=", "values", ".", "copy", "(", ")", "if", "filename", ":", "default_url_for_args", "[", "'filename'", "]", "=", "filename", ...
Replace flasks url_for() function to allow usage without template changes If the requested endpoint is static or ending in .static, it tries to serve a bower asset, otherwise it will pass the arguments to flask.url_for() See http://flask.pocoo.org/docs/0.10/api/#flask.url_for
[ "Replace", "flasks", "url_for", "()", "function", "to", "allow", "usage", "without", "template", "changes" ]
train
https://github.com/lobeck/flask-bower/blob/3ebe08a0931d07e82cb57998db3390d2b5921444/flask_bower/__init__.py#L71-L97